From a02a46e454c63ab14e7a9351a69c012b81416f68 Mon Sep 17 00:00:00 2001 From: katexochen <49727155+katexochen@users.noreply.github.com> Date: Mon, 1 Aug 2022 16:51:34 +0200 Subject: [PATCH] Use multiple loadbalancers on GCP --- bootstrapper/cloudprovider/azure/metadata.go | 7 +- .../cloudprovider/azure/metadata_test.go | 6 +- bootstrapper/cloudprovider/gcp/client.go | 13 +- bootstrapper/cloudprovider/gcp/client_test.go | 32 +- bootstrapper/cloudprovider/gcp/metadata.go | 10 +- .../cloudprovider/gcp/metadata_test.go | 6 +- bootstrapper/cloudprovider/qemu/metadata.go | 6 +- bootstrapper/cmd/bootstrapper/main.go | 36 +- bootstrapper/cmd/bootstrapper/run.go | 8 +- bootstrapper/cmd/bootstrapper/test.go | 4 + .../internal/initserver/initserver.go | 6 +- .../internal/kubernetes/cloud_provider.go | 12 +- .../kubernetes/k8sapi/kubeadm_config.go | 6 +- .../internal/kubernetes/kubernetes.go | 81 +-- .../internal/kubernetes/kubernetes_test.go | 16 +- cli/internal/azure/client/client.go | 4 +- cli/internal/azure/client/client_test.go | 4 +- cli/internal/azure/loadbalancer.go | 12 +- cli/internal/cloudcmd/clients.go | 4 +- cli/internal/cloudcmd/clients_test.go | 30 +- cli/internal/cloudcmd/create.go | 2 +- cli/internal/cloudcmd/create_test.go | 4 +- cli/internal/cloudcmd/rollback.go | 2 +- cli/internal/cloudcmd/terminate.go | 2 +- cli/internal/cmd/create.go | 2 +- cli/internal/cmd/create_test.go | 2 +- cli/internal/cmd/init.go | 1 + cli/internal/cmd/verify_test.go | 4 +- cli/internal/gcp/client/api.go | 25 +- cli/internal/gcp/client/api_test.go | 45 ++ cli/internal/gcp/client/client.go | 42 +- cli/internal/gcp/client/client_test.go | 42 +- cli/internal/gcp/client/gcpwrappers.go | 40 +- cli/internal/gcp/client/instances.go | 25 +- cli/internal/gcp/client/loadbalancer.go | 426 ++++++++++++ cli/internal/gcp/client/loadbalancer_test.go | 628 ++++++++++++++++++ cli/internal/gcp/client/network.go | 148 ----- cli/internal/gcp/client/network_test.go | 182 ----- cli/internal/gcp/client/operation.go | 9 + debugd/cdbg/cmd/deploy.go | 9 +- debugd/debugd/cmd/debugd/debugd.go | 26 +- debugd/debugd/constants.go | 1 - debugd/debugd/deploy/download.go | 5 +- debugd/debugd/deploy/download_test.go | 6 +- .../metadata/cloudprovider/cloudprovider.go | 22 + .../cloudprovider/cloudprovider_test.go | 64 +- debugd/debugd/metadata/fallback/fallback.go | 5 + debugd/debugd/metadata/scheduler.go | 1 + debugd/debugd/metadata/scheduler_test.go | 4 + debugd/debugd/server/server.go | 12 +- debugd/debugd/server/server_test.go | 12 +- go.mod | 2 +- go.sum | 3 +- internal/constants/constants.go | 8 +- internal/iproute/route.go | 44 ++ internal/state/state.go | 13 +- mount/kms/constellation.go | 2 +- state/internal/setup/setup.go | 6 +- verify/server/server.go | 7 +- 59 files changed, 1629 insertions(+), 557 deletions(-) create mode 100644 cli/internal/gcp/client/loadbalancer.go create mode 100644 cli/internal/gcp/client/loadbalancer_test.go create mode 100644 internal/iproute/route.go diff --git a/bootstrapper/cloudprovider/azure/metadata.go b/bootstrapper/cloudprovider/azure/metadata.go index e1edb8525..202aaad30 100644 --- a/bootstrapper/cloudprovider/azure/metadata.go +++ b/bootstrapper/cloudprovider/azure/metadata.go @@ -247,8 +247,11 @@ func (m *Metadata) GetLoadBalancerName(ctx context.Context) (string, error) { return *lb.Name, nil } -// GetLoadBalancerIP retrieves the first load balancer IP from cloud provider metadata. -func (m *Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) { +// GetLoadBalancerEndpoint retrieves the first load balancer IP from cloud provider metadata. +// +// The returned string is an IP address without a port, but the method name needs to satisfy the +// metadata interface. +func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) { lb, err := m.getLoadBalancer(ctx) if err != nil { return "", err diff --git a/bootstrapper/cloudprovider/azure/metadata_test.go b/bootstrapper/cloudprovider/azure/metadata_test.go index a4b9310e9..f2126729d 100644 --- a/bootstrapper/cloudprovider/azure/metadata_test.go +++ b/bootstrapper/cloudprovider/azure/metadata_test.go @@ -307,7 +307,7 @@ func TestGetLoadBalancerName(t *testing.T) { } } -func TestGetLoadBalancerIP(t *testing.T) { +func TestGetLoadBalancerEndpoint(t *testing.T) { loadBalancerName := "load-balancer-name" publicIP := "192.0.2.1" correctPublicIPID := "/subscriptions/subscription/resourceGroups/resourceGroup/providers/Microsoft.Network/publicIPAddresses/pubIPName" @@ -319,7 +319,7 @@ func TestGetLoadBalancerIP(t *testing.T) { wantIP string wantErr bool }{ - "GetLoadBalancerIP works": { + "GetLoadBalancerEndpoint works": { imdsAPI: newScaleSetIMDSStub(), loadBalancerAPI: &stubLoadBalancersAPI{ pager: &stubLoadBalancersClientListPager{ @@ -446,7 +446,7 @@ func TestGetLoadBalancerIP(t *testing.T) { loadBalancerAPI: tc.loadBalancerAPI, publicIPAddressesAPI: tc.publicIPAddressesAPI, } - loadbalancerName, err := metadata.GetLoadBalancerIP(context.Background()) + loadbalancerName, err := metadata.GetLoadBalancerEndpoint(context.Background()) if tc.wantErr { assert.Error(err) return diff --git a/bootstrapper/cloudprovider/gcp/client.go b/bootstrapper/cloudprovider/gcp/client.go index 334a068ed..1985346cb 100644 --- a/bootstrapper/cloudprovider/gcp/client.go +++ b/bootstrapper/cloudprovider/gcp/client.go @@ -2,7 +2,9 @@ package gcp import ( "context" + "errors" "fmt" + "net" "regexp" "strings" @@ -213,8 +215,8 @@ func (c *Client) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, return *(subnetwork.SecondaryIpRanges[0]).IpCidrRange, nil } -// RetrieveLoadBalancerIP returns the IP address of the load balancer specified by project, zone and loadBalancerName. -func (c *Client) RetrieveLoadBalancerIP(ctx context.Context, project, zone string) (string, error) { +// RetrieveLoadBalancerEndpoint returns the endpoint of the load balancer with the constellation-uid tag. +func (c *Client) RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone string) (string, error) { uid, err := c.UID() if err != nil { return "", err @@ -226,8 +228,8 @@ func (c *Client) RetrieveLoadBalancerIP(ctx context.Context, project, zone strin } req := &computepb.ListForwardingRulesRequest{ - Region: region, Project: project, + Region: region, } iter := c.forwardingRulesAPI.List(ctx, req) for { @@ -239,7 +241,10 @@ func (c *Client) RetrieveLoadBalancerIP(ctx context.Context, project, zone strin return "", fmt.Errorf("retrieving load balancer IP failed: %w", err) } if resp.Labels["constellation-uid"] == uid { - return *resp.IPAddress, nil + if len(resp.Ports) == 0 { + return "", errors.New("load balancer with searched UID has no ports") + } + return net.JoinHostPort(*resp.IPAddress, resp.Ports[0]), nil } } diff --git a/bootstrapper/cloudprovider/gcp/client_test.go b/bootstrapper/cloudprovider/gcp/client_test.go index b1df81ec4..d7ca5012c 100644 --- a/bootstrapper/cloudprovider/gcp/client_test.go +++ b/bootstrapper/cloudprovider/gcp/client_test.go @@ -773,7 +773,7 @@ func TestRetrieveSubnetworkAliasCIDR(t *testing.T) { } } -func TestRetrieveLoadBalancerIP(t *testing.T) { +func TestRetrieveLoadBalancerEndpoint(t *testing.T) { loadBalancerIP := "192.0.2.1" uid := "uid" someErr := errors.New("some error") @@ -783,13 +783,14 @@ func TestRetrieveLoadBalancerIP(t *testing.T) { wantLoadBalancerIP string wantErr bool }{ - "RetrieveSubnetworkAliasCIDR works": { + "works": { stubMetadataClient: stubMetadataClient{InstanceValue: uid}, stubForwardingRulesClient: stubForwardingRulesClient{ ForwardingRuleIterator: &stubForwardingRuleIterator{ rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), + Ports: []string{"100"}, Labels: map[string]string{"constellation-uid": uid}, }, }, @@ -797,20 +798,36 @@ func TestRetrieveLoadBalancerIP(t *testing.T) { }, wantLoadBalancerIP: loadBalancerIP, }, - "RetrieveSubnetworkAliasCIDR fails when no matching load balancers exists": { + "fails when no matching load balancers exists": { stubMetadataClient: stubMetadataClient{InstanceValue: uid}, stubForwardingRulesClient: stubForwardingRulesClient{ ForwardingRuleIterator: &stubForwardingRuleIterator{ rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), + Ports: []string{"100"}, }, }, }, }, wantErr: true, }, - "RetrieveSubnetworkAliasCIDR fails when retrieving uid": { + "fails when retrieving uid": { + stubMetadataClient: stubMetadataClient{InstanceErr: someErr}, + stubForwardingRulesClient: stubForwardingRulesClient{ + ForwardingRuleIterator: &stubForwardingRuleIterator{ + rules: []*computepb.ForwardingRule{ + { + IPAddress: proto.String(loadBalancerIP), + Ports: []string{"100"}, + Labels: map[string]string{"constellation-uid": uid}, + }, + }, + }, + }, + wantErr: true, + }, + "fails when answer has empty port range": { stubMetadataClient: stubMetadataClient{InstanceErr: someErr}, stubForwardingRulesClient: stubForwardingRulesClient{ ForwardingRuleIterator: &stubForwardingRuleIterator{ @@ -824,7 +841,7 @@ func TestRetrieveLoadBalancerIP(t *testing.T) { }, wantErr: true, }, - "RetrieveSubnetworkAliasCIDR fails when retrieving loadbalancer IP": { + "fails when retrieving loadbalancer IP": { stubMetadataClient: stubMetadataClient{}, stubForwardingRulesClient: stubForwardingRulesClient{ ForwardingRuleIterator: &stubForwardingRuleIterator{ @@ -832,6 +849,7 @@ func TestRetrieveLoadBalancerIP(t *testing.T) { rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), + Ports: []string{"100"}, Labels: map[string]string{"constellation-uid": uid}, }, }, @@ -846,14 +864,14 @@ func TestRetrieveLoadBalancerIP(t *testing.T) { require := require.New(t) client := Client{forwardingRulesAPI: tc.stubForwardingRulesClient, metadataAPI: tc.stubMetadataClient} - aliasCIDR, err := client.RetrieveLoadBalancerIP(context.Background(), "project", "us-central1-a") + aliasCIDR, err := client.RetrieveLoadBalancerEndpoint(context.Background(), "project", "us-central1-a") if tc.wantErr { assert.Error(err) return } require.NoError(err) - assert.Equal(tc.wantLoadBalancerIP, aliasCIDR) + assert.Equal(tc.wantLoadBalancerIP+":100", aliasCIDR) }) } } diff --git a/bootstrapper/cloudprovider/gcp/metadata.go b/bootstrapper/cloudprovider/gcp/metadata.go index a164dda8c..81a7346e8 100644 --- a/bootstrapper/cloudprovider/gcp/metadata.go +++ b/bootstrapper/cloudprovider/gcp/metadata.go @@ -26,8 +26,8 @@ type API interface { RetrieveInstanceName() (string, error) // RetrieveSubnetworkAliasCIDR retrieves the subnetwork CIDR of the current instance. RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) - // RetrieveLoadBalancerIP retrieves the load balancer IP of the current instance. - RetrieveLoadBalancerIP(ctx context.Context, project, zone string) (string, error) + // RetrieveLoadBalancerEndpoint retrieves the load balancer endpoint of the current instance. + RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone string) (string, error) // SetInstanceMetadata sets metadata key: value of the instance specified by project, zone and instanceName. SetInstanceMetadata(ctx context.Context, project, zone, instanceName, key, value string) error // UnsetInstanceMetadata removes a metadata key-value pair of the instance specified by project, zone and instanceName. @@ -111,8 +111,8 @@ func (m *Metadata) SupportsLoadBalancer() bool { return true } -// GetLoadBalancerIP returns the IP of the load balancer. -func (m *Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) { +// GetLoadBalancerEndpoint returns the endpoint of the load balancer. +func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) { project, err := m.api.RetrieveProjectID() if err != nil { return "", err @@ -121,7 +121,7 @@ func (m *Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) { if err != nil { return "", err } - return m.api.RetrieveLoadBalancerIP(ctx, project, zone) + return m.api.RetrieveLoadBalancerEndpoint(ctx, project, zone) } // UID retrieves the UID of the constellation. diff --git a/bootstrapper/cloudprovider/gcp/metadata_test.go b/bootstrapper/cloudprovider/gcp/metadata_test.go index e3a48b58e..f10c5347e 100644 --- a/bootstrapper/cloudprovider/gcp/metadata_test.go +++ b/bootstrapper/cloudprovider/gcp/metadata_test.go @@ -239,7 +239,7 @@ type stubGCPClient struct { retrieveInstancesErr error retrieveInstanceMetadaValues map[string]string retrieveInstanceMetadataErr error - retrieveSubentworkAliasErr error + retrieveSubnetworkAliasErr error projectID string zone string instanceName string @@ -287,7 +287,7 @@ func (s *stubGCPClient) RetrieveInstanceName() (string, error) { return s.instanceName, s.retrieveInstanceNameErr } -func (s *stubGCPClient) RetrieveLoadBalancerIP(ctx context.Context, project, zone string) (string, error) { +func (s *stubGCPClient) RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone string) (string, error) { return s.loadBalancerIP, s.retrieveLoadBalancerErr } @@ -315,5 +315,5 @@ func (s *stubGCPClient) UnsetInstanceMetadata(ctx context.Context, project, zone } func (s *stubGCPClient) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) { - return "", s.retrieveSubentworkAliasErr + return "", s.retrieveSubnetworkAliasErr } diff --git a/bootstrapper/cloudprovider/qemu/metadata.go b/bootstrapper/cloudprovider/qemu/metadata.go index e0203f412..c87a2a64e 100644 --- a/bootstrapper/cloudprovider/qemu/metadata.go +++ b/bootstrapper/cloudprovider/qemu/metadata.go @@ -65,9 +65,9 @@ func (m Metadata) SupportsLoadBalancer() bool { return false } -// GetLoadBalancerIP returns the IP of the load balancer. -func (m Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) { - panic("function *Metadata.GetLoadBalancerIP not implemented") +// GetLoadBalancerEndpoint returns the endpoint of the load balancer. +func (m Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) { + panic("function *Metadata.GetLoadBalancerEndpoint not implemented") } // UID returns the UID of the constellation. diff --git a/bootstrapper/cmd/bootstrapper/main.go b/bootstrapper/cmd/bootstrapper/main.go index 3d05812f4..f45bda6ed 100644 --- a/bootstrapper/cmd/bootstrapper/main.go +++ b/bootstrapper/cmd/bootstrapper/main.go @@ -5,13 +5,14 @@ import ( "encoding/json" "flag" "io" + "net" "os" + "strconv" "strings" azurecloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/azure" gcpcloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/gcp" qemucloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/qemu" - "github.com/edgelesssys/constellation/bootstrapper/internal/joinclient" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/kubectl" @@ -22,7 +23,9 @@ import ( "github.com/edgelesssys/constellation/internal/attestation/qemu" "github.com/edgelesssys/constellation/internal/attestation/simulator" "github.com/edgelesssys/constellation/internal/attestation/vtpm" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/file" + "github.com/edgelesssys/constellation/internal/iproute" "github.com/edgelesssys/constellation/internal/logger" "github.com/edgelesssys/constellation/internal/oid" "github.com/spf13/afero" @@ -30,8 +33,6 @@ import ( ) const ( - defaultIP = "0.0.0.0" - defaultPort = "9000" // ConstellationCSP is the environment variable stating which Cloud Service Provider Constellation is running on. constellationCSP = "CONSTEL_CSP" ) @@ -52,9 +53,10 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var bindIP, bindPort string + bindIP := "0.0.0.0" + bindPort := strconv.Itoa(constants.BootstrapperPort) var clusterInitJoiner clusterInitJoiner - var metadataAPI joinclient.MetadataAPI + var metadataAPI metadataAPI var cloudLogger logging.CloudLogger var issuer atls.Issuer var openTPM vtpm.TPMOpenFunc @@ -93,10 +95,12 @@ func main() { "gcp", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &gcpcloud.CloudControllerManager{}, &gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata, pcrsJSON, ) - bindIP = defaultIP - bindPort = defaultPort openTPM = vtpm.OpenVTPM fs = afero.NewOsFs() + if err := setLoadbalancerRoute(ctx, metadata); err != nil { + log.With(zap.Error(err)).Fatalf("Failed to set loadbalancer route") + } + log.Infof("Added load balancer IP to routing table") case "azure": pcrs, err := vtpm.GetSelectedPCRs(vtpm.OpenVTPM, vtpm.AzurePCRSelection) if err != nil { @@ -123,8 +127,6 @@ func main() { &azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata, pcrsJSON, ) - bindIP = defaultIP - bindPort = defaultPort openTPM = vtpm.OpenVTPM fs = afero.NewOsFs() case "qemu": @@ -147,8 +149,6 @@ func main() { ) metadataAPI = metadata - bindIP = defaultIP - bindPort = defaultPort openTPM = vtpm.OpenVTPM fs = afero.NewOsFs() default: @@ -156,8 +156,6 @@ func main() { clusterInitJoiner = &clusterFake{} metadataAPI = &providerMetadataFake{} cloudLogger = &logging.NopLogger{} - bindIP = defaultIP - bindPort = defaultPort var simulatedTPMCloser io.Closer openTPM, simulatedTPMCloser = simulator.NewSimulatedTPMOpenFunc() defer simulatedTPMCloser.Close() @@ -168,3 +166,15 @@ func main() { run(issuer, openTPM, fileHandler, clusterInitJoiner, metadataAPI, bindIP, bindPort, log, cloudLogger) } + +func setLoadbalancerRoute(ctx context.Context, meta metadataAPI) error { + endpoint, err := meta.GetLoadBalancerEndpoint(ctx) + if err != nil { + return err + } + ip, _, err := net.SplitHostPort(endpoint) + if err != nil { + return err + } + return iproute.AddToLocalRoutingTable(ctx, ip) +} diff --git a/bootstrapper/cmd/bootstrapper/run.go b/bootstrapper/cmd/bootstrapper/run.go index e5a0b0aba..79ca5f1de 100644 --- a/bootstrapper/cmd/bootstrapper/run.go +++ b/bootstrapper/cmd/bootstrapper/run.go @@ -1,6 +1,7 @@ package main import ( + "context" "net" "github.com/edgelesssys/constellation/bootstrapper/internal/clean" @@ -20,7 +21,7 @@ import ( var version = "0.0.0" func run(issuer quoteIssuer, tpm vtpm.TPMOpenFunc, fileHandler file.Handler, - kube clusterInitJoiner, metadata joinclient.MetadataAPI, + kube clusterInitJoiner, metadata metadataAPI, bindIP, bindPort string, log *logger.Logger, cloudLogger logging.CloudLogger, ) { @@ -90,3 +91,8 @@ type quoteIssuer interface { // Issue issues a quote for remote attestation for a given message Issue(userData []byte, nonce []byte) (quote []byte, err error) } + +type metadataAPI interface { + joinclient.MetadataAPI + GetLoadBalancerEndpoint(ctx context.Context) (string, error) +} diff --git a/bootstrapper/cmd/bootstrapper/test.go b/bootstrapper/cmd/bootstrapper/test.go index dc8afedfe..f336b299f 100644 --- a/bootstrapper/cmd/bootstrapper/test.go +++ b/bootstrapper/cmd/bootstrapper/test.go @@ -55,6 +55,10 @@ func (f *providerMetadataFake) SetVPNIP(ctx context.Context, vpnIP string) error return nil } +func (f *providerMetadataFake) GetLoadBalancerEndpoint(ctx context.Context) (string, error) { + return "", nil +} + func (f *providerMetadataFake) Supported() bool { return true } diff --git a/bootstrapper/internal/initserver/initserver.go b/bootstrapper/internal/initserver/initserver.go index 91aea9f28..73acdbcd7 100644 --- a/bootstrapper/internal/initserver/initserver.go +++ b/bootstrapper/internal/initserver/initserver.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "strings" + "time" "github.com/edgelesssys/constellation/bootstrapper/initproto" "github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption" @@ -21,6 +22,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) @@ -53,12 +55,12 @@ func New(lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handl grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), + grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), log.Named("gRPC").GetServerUnaryInterceptor(), ) initproto.RegisterAPIServer(grpcServer, server) server.grpcServer = grpcServer - return server } @@ -69,6 +71,8 @@ func (s *Server) Serve(ip, port string, cleaner cleaner) error { if err != nil { return fmt.Errorf("failed to listen: %w", err) } + + s.log.Infof("Starting") return s.grpcServer.Serve(lis) } diff --git a/bootstrapper/internal/kubernetes/cloud_provider.go b/bootstrapper/internal/kubernetes/cloud_provider.go index 5a650a5e6..8418f64ea 100644 --- a/bootstrapper/internal/kubernetes/cloud_provider.go +++ b/bootstrapper/internal/kubernetes/cloud_provider.go @@ -21,8 +21,8 @@ type ProviderMetadata interface { GetSubnetworkCIDR(ctx context.Context) (string, error) // SupportsLoadBalancer returns true if the cloud provider supports load balancers. SupportsLoadBalancer() bool - // GetLoadBalancerIP retrieves the load balancer IP. - GetLoadBalancerIP(ctx context.Context) (string, error) + // GetLoadBalancerEndpoint retrieves the load balancer endpoint. + GetLoadBalancerEndpoint(ctx context.Context) (string, error) // GetInstance retrieves an instance using its providerID. GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) // Supported is used to determine if metadata API is implemented for this cloud provider. @@ -85,8 +85,8 @@ type ClusterAutoscaler interface { } type stubProviderMetadata struct { - GetLoadBalancerIPErr error - GetLoadBalancerIPResp string + GetLoadBalancerEndpointErr error + GetLoadBalancerEndpointResp string GetSubnetworkCIDRErr error GetSubnetworkCIDRResp string @@ -107,8 +107,8 @@ type stubProviderMetadata struct { UIDResp string } -func (m *stubProviderMetadata) GetLoadBalancerIP(ctx context.Context) (string, error) { - return m.GetLoadBalancerIPResp, m.GetLoadBalancerIPErr +func (m *stubProviderMetadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) { + return m.GetLoadBalancerEndpointResp, m.GetLoadBalancerEndpointErr } func (m *stubProviderMetadata) GetSubnetworkCIDR(ctx context.Context) (string, error) { diff --git a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go index 4614e2098..f4f61fdca 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go +++ b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go @@ -5,6 +5,7 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/internal/kubelet" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/versions" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,7 +18,6 @@ import ( // Slimmed down to the fields we require const ( - bindPort = 6443 auditLogDir = "/var/log/kubernetes/audit/" auditLogFile = "audit.log" auditPolicyPath = "/etc/kubernetes/audit-policy.yaml" @@ -45,7 +45,7 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool, k8sV }, // AdvertiseAddress will be overwritten later LocalAPIEndpoint: kubeadm.APIEndpoint{ - BindPort: bindPort, + BindPort: constants.KubernetesPort, }, }, // https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3#ClusterConfiguration @@ -216,7 +216,7 @@ func (k *KubeadmJoinYAML) SetControlPlane(advertiseAddress string) { k.JoinConfiguration.ControlPlane = &kubeadm.JoinControlPlane{ LocalAPIEndpoint: kubeadm.APIEndpoint{ AdvertiseAddress: advertiseAddress, - BindPort: 6443, + BindPort: constants.KubernetesPort, }, } k.JoinConfiguration.SkipPhases = []string{"control-plane-prepare/download-certs"} diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index 6c7fe1fc7..8c1e6770a 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -3,10 +3,8 @@ package kubernetes import ( "context" "encoding/json" - "errors" "fmt" "net" - "os/exec" "strings" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" @@ -15,6 +13,7 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/util" "github.com/edgelesssys/constellation/internal/cloud/metadata" "github.com/edgelesssys/constellation/internal/constants" + "github.com/edgelesssys/constellation/internal/iproute" "github.com/edgelesssys/constellation/internal/logger" "github.com/edgelesssys/constellation/internal/versions" "github.com/spf13/afero" @@ -93,7 +92,7 @@ func (k *KubeWrapper) InitCluster( var publicIP string var nodePodCIDR string var subnetworkPodCIDR string - var controlPlaneEndpointIP string // this is the IP in "kubeadm init --control-plane-endpoint=:" hence the unfortunate name + var controlPlaneEndpoint string // this is the endpoint in "kubeadm init --control-plane-endpoint=:" var nodeIP string var validIPs []net.IP @@ -102,7 +101,7 @@ func (k *KubeWrapper) InitCluster( log.Infof("Retrieving node metadata") instance, err = k.providerMetadata.Self(ctx) if err != nil { - return nil, fmt.Errorf("retrieving own instance metadata failed: %w", err) + return nil, fmt.Errorf("retrieving own instance metadata: %w", err) } if instance.VPCIP != "" { validIPs = append(validIPs, net.ParseIP(instance.VPCIP)) @@ -120,18 +119,13 @@ func (k *KubeWrapper) InitCluster( } subnetworkPodCIDR, err = k.providerMetadata.GetSubnetworkCIDR(ctx) if err != nil { - return nil, fmt.Errorf("retrieving subnetwork CIDR failed: %w", err) + return nil, fmt.Errorf("retrieving subnetwork CIDR: %w", err) } - controlPlaneEndpointIP = publicIP + controlPlaneEndpoint = publicIP if k.providerMetadata.SupportsLoadBalancer() { - controlPlaneEndpointIP, err = k.providerMetadata.GetLoadBalancerIP(ctx) + controlPlaneEndpoint, err = k.providerMetadata.GetLoadBalancerEndpoint(ctx) if err != nil { - return nil, fmt.Errorf("retrieving load balancer IP failed: %w", err) - } - if k.cloudProvider == "gcp" { - if err := manuallySetLoadbalancerIP(ctx, controlPlaneEndpointIP); err != nil { - return nil, fmt.Errorf("setting load balancer IP failed: %w", err) - } + return nil, fmt.Errorf("retrieving load balancer endpoint: %w", err) } } } @@ -139,7 +133,7 @@ func (k *KubeWrapper) InitCluster( zap.String("nodeName", nodeName), zap.String("providerID", providerID), zap.String("nodeIP", nodeIP), - zap.String("controlPlaneEndpointIP", controlPlaneEndpointIP), + zap.String("controlPlaneEndpointEndpoint", controlPlaneEndpoint), zap.String("podCIDR", subnetworkPodCIDR), ).Infof("Setting information for node") @@ -149,7 +143,7 @@ func (k *KubeWrapper) InitCluster( initConfig.SetCertSANs([]string{publicIP, nodeIP}) initConfig.SetNodeName(nodeName) initConfig.SetProviderID(providerID) - initConfig.SetControlPlaneEndpoint(controlPlaneEndpointIP) + initConfig.SetControlPlaneEndpoint(controlPlaneEndpoint) initConfigYAML, err := initConfig.Marshal() if err != nil { return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err) @@ -249,15 +243,20 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo } nodeName := nodeInternalIP var providerID string + var loadbalancerEndpoint string if k.providerMetadata.Supported() { log.Infof("Retrieving node metadata") instance, err := k.providerMetadata.Self(ctx) if err != nil { - return fmt.Errorf("retrieving own instance metadata failed: %w", err) + return fmt.Errorf("retrieving own instance metadata: %w", err) } providerID = instance.ProviderID nodeName = instance.Name nodeInternalIP = instance.VPCIP + loadbalancerEndpoint, err = k.providerMetadata.GetLoadBalancerEndpoint(ctx) + if err != nil { + return fmt.Errorf("retrieving loadbalancer endpoint: %w", err) + } } nodeName = k8sCompliantHostname(nodeName) @@ -267,8 +266,19 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo zap.String("nodeIP", nodeInternalIP), ).Infof("Setting information for node") - // Step 2: configure kubeadm join config + // Step 2: Remove load balancer from local routing table on GCP. + if k.cloudProvider == "gcp" { + ip, _, err := net.SplitHostPort(loadbalancerEndpoint) + if err != nil { + return fmt.Errorf("parsing load balancer IP: %w", err) + } + if err := iproute.RemoveFromLocalRoutingTable(ctx, ip); err != nil { + return fmt.Errorf("removing load balancer IP from routing table: %w", err) + } + log.Infof("Removed load balancer IP from routing table") + } + // Step 3: configure kubeadm join config joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported()) joinConfig.SetAPIServerEndpoint(args.APIServerEndpoint) joinConfig.SetToken(args.Token) @@ -319,15 +329,15 @@ func (k *KubeWrapper) setupCCM(ctx context.Context, subnetworkPodCIDR, cloudServ } ccmConfigMaps, err := k.cloudControllerManager.ConfigMaps(instance) if err != nil { - return fmt.Errorf("defining ConfigMaps for CCM failed: %w", err) + return fmt.Errorf("defining ConfigMaps for CCM: %w", err) } ccmSecrets, err := k.cloudControllerManager.Secrets(ctx, instance.ProviderID, cloudServiceAccountURI) if err != nil { - return fmt.Errorf("defining Secrets for CCM failed: %w", err) + return fmt.Errorf("defining Secrets for CCM: %w", err) } ccmImage, err := k.cloudControllerManager.Image(k8sVersion) if err != nil { - return fmt.Errorf("defining Image for CCM failed: %w", err) + return fmt.Errorf("defining Image for CCM: %w", err) } cloudControllerManagerConfiguration := resources.NewDefaultCloudControllerManagerDeployment( @@ -335,7 +345,7 @@ func (k *KubeWrapper) setupCCM(ctx context.Context, subnetworkPodCIDR, cloudServ k.cloudControllerManager.ExtraArgs(), k.cloudControllerManager.Volumes(), k.cloudControllerManager.VolumeMounts(), k.cloudControllerManager.Env(), ) if err := k.clusterUtil.SetupCloudControllerManager(k.client, cloudControllerManagerConfiguration, ccmConfigMaps, ccmSecrets); err != nil { - return fmt.Errorf("failed to setup cloud-controller-manager: %w", err) + return fmt.Errorf("setting up cloud-controller-manager: %w", err) } return nil @@ -347,14 +357,14 @@ func (k *KubeWrapper) setupCloudNodeManager(k8sVersion versions.ValidK8sVersion) } nodeManagerImage, err := k.cloudNodeManager.Image(k8sVersion) if err != nil { - return fmt.Errorf("defining Image for Node Manager failed: %w", err) + return fmt.Errorf("defining Image for Node Manager: %w", err) } cloudNodeManagerConfiguration := resources.NewDefaultCloudNodeManagerDeployment( nodeManagerImage, k.cloudNodeManager.Path(), k.cloudNodeManager.ExtraArgs(), ) if err := k.clusterUtil.SetupCloudNodeManager(k.client, cloudNodeManagerConfiguration); err != nil { - return fmt.Errorf("failed to setup cloud-node-manager: %w", err) + return fmt.Errorf("setting up cloud-node-manager: %w", err) } return nil @@ -366,13 +376,13 @@ func (k *KubeWrapper) setupClusterAutoscaler(instance metadata.InstanceMetadata, } caSecrets, err := k.clusterAutoscaler.Secrets(instance.ProviderID, cloudServiceAccountURI) if err != nil { - return fmt.Errorf("defining Secrets for cluster-autoscaler failed: %w", err) + return fmt.Errorf("defining Secrets for cluster-autoscaler: %w", err) } clusterAutoscalerConfiguration := resources.NewDefaultAutoscalerDeployment(k.clusterAutoscaler.Volumes(), k.clusterAutoscaler.VolumeMounts(), k.clusterAutoscaler.Env(), k8sVersion) clusterAutoscalerConfiguration.SetAutoscalerCommand(k.clusterAutoscaler.Name(), autoscalingNodeGroups) if err := k.clusterUtil.SetupAutoscaling(k.client, clusterAutoscalerConfiguration, caSecrets); err != nil { - return fmt.Errorf("failed to setup cluster-autoscaler: %w", err) + return fmt.Errorf("setting up cluster-autoscaler: %w", err) } return nil @@ -425,27 +435,6 @@ func (k *KubeWrapper) setupOperators(ctx context.Context) error { return nil } -// manuallySetLoadbalancerIP sets the loadbalancer IP of the first control plane during init. -// The GCP guest agent does this usually, but is deployed in the cluster that doesn't exist -// at this point. This is a workaround to set the loadbalancer IP manually, so kubeadm and kubelet -// can talk to the local Kubernetes API server using the loadbalancer IP. -func manuallySetLoadbalancerIP(ctx context.Context, ip string) error { - // https://github.com/GoogleCloudPlatform/guest-agent/blob/792fce795218633bcbde505fb3457a0b24f26d37/google_guest_agent/addresses.go#L179 - if !strings.Contains(ip, "/") { - ip = ip + "/32" - } - args := []string{"route", "add", "to", "local", ip, "scope", "host", "dev", "ens3", "proto", "66"} - _, err := exec.CommandContext(ctx, "ip", args...).Output() - if err != nil { - var exitErr *exec.ExitError - if errors.As(err, &exitErr) { - return fmt.Errorf("ip route add (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr) - } - return fmt.Errorf("ip route add: %w", err) - } - return nil -} - // k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names. // The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ . // Only a simple heuristic is used for now (to lowercase, replace underscores). diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index 213d487e9..fd5a320d5 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -5,12 +5,14 @@ import ( "errors" "net" "regexp" + "strconv" "testing" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" "github.com/edgelesssys/constellation/bootstrapper/role" "github.com/edgelesssys/constellation/internal/cloud/metadata" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/logger" "github.com/edgelesssys/constellation/internal/versions" "github.com/stretchr/testify/assert" @@ -86,8 +88,8 @@ func TestInitCluster(t *testing.T) { PublicIP: publicIP, AliasIPRanges: []string{aliasIPRange}, }, - GetLoadBalancerIPResp: loadbalancerIP, - SupportsLoadBalancerResp: true, + GetLoadBalancerEndpointResp: loadbalancerIP, + SupportsLoadBalancerResp: true, }, CloudControllerManager: &stubCloudControllerManager{}, CloudNodeManager: &stubCloudNodeManager{SupportedResp: false}, @@ -148,9 +150,9 @@ func TestInitCluster(t *testing.T) { Kubeconfig: []byte("someKubeconfig"), }, providerMetadata: &stubProviderMetadata{ - GetLoadBalancerIPErr: someErr, - SupportsLoadBalancerResp: true, - SupportedResp: true, + GetLoadBalancerEndpointErr: someErr, + SupportsLoadBalancerResp: true, + SupportedResp: true, }, CloudControllerManager: &stubCloudControllerManager{}, CloudNodeManager: &stubCloudNodeManager{}, @@ -319,7 +321,7 @@ func TestInitCluster(t *testing.T) { func TestJoinCluster(t *testing.T) { someErr := errors.New("failed") joinCommand := &kubeadm.BootstrapTokenDiscovery{ - APIServerEndpoint: "192.0.2.0:6443", + APIServerEndpoint: "192.0.2.0:" + strconv.Itoa(constants.KubernetesPort), Token: "kube-fake-token", CACertHashes: []string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"}, } @@ -419,7 +421,7 @@ func TestJoinCluster(t *testing.T) { ControlPlane: &kubeadm.JoinControlPlane{ LocalAPIEndpoint: kubeadm.APIEndpoint{ AdvertiseAddress: "192.0.2.1", - BindPort: 6443, + BindPort: constants.KubernetesPort, }, }, SkipPhases: []string{"control-plane-prepare/download-certs"}, diff --git a/cli/internal/azure/client/client.go b/cli/internal/azure/client/client.go index e433cb0e4..02b8334ca 100644 --- a/cli/internal/azure/client/client.go +++ b/cli/internal/azure/client/client.go @@ -172,7 +172,7 @@ func (c *Client) GetState() state.ConstellationState { Name: c.name, UID: c.uid, CloudProvider: cloudprovider.Azure.String(), - BootstrapperHost: c.loadBalancerPubIP, + LoadBalancerIP: c.loadBalancerPubIP, AzureLocation: c.location, AzureSubscription: c.subscriptionID, AzureTenant: c.tenantID, @@ -192,7 +192,7 @@ func (c *Client) SetState(stat state.ConstellationState) { c.resourceGroup = stat.AzureResourceGroup c.name = stat.Name c.uid = stat.UID - c.loadBalancerPubIP = stat.BootstrapperHost + c.loadBalancerPubIP = stat.LoadBalancerIP c.location = stat.AzureLocation c.subscriptionID = stat.AzureSubscription c.tenantID = stat.AzureTenant diff --git a/cli/internal/azure/client/client_test.go b/cli/internal/azure/client/client_test.go index 13c321f45..50ac80bee 100644 --- a/cli/internal/azure/client/client_test.go +++ b/cli/internal/azure/client/client_test.go @@ -26,7 +26,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", - BootstrapperHost: "bootstrapper-host", + LoadBalancerIP: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -64,7 +64,7 @@ func TestSetGetState(t *testing.T) { controlPlanes: state.AzureControlPlaneInstances, name: state.Name, uid: state.UID, - loadBalancerPubIP: state.BootstrapperHost, + loadBalancerPubIP: state.LoadBalancerIP, resourceGroup: state.AzureResourceGroup, location: state.AzureLocation, subscriptionID: state.AzureSubscription, diff --git a/cli/internal/azure/loadbalancer.go b/cli/internal/azure/loadbalancer.go index c0993ff85..be95122c0 100644 --- a/cli/internal/azure/loadbalancer.go +++ b/cli/internal/azure/loadbalancer.go @@ -62,7 +62,7 @@ func (l LoadBalancer) Azure() armnetwork.LoadBalancer { Name: to.Ptr(kubeHealthProbeName), Properties: &armnetwork.ProbePropertiesFormat{ Protocol: to.Ptr(armnetwork.ProbeProtocolTCP), - Port: to.Ptr(int32(6443)), + Port: to.Ptr(int32(constants.KubernetesPort)), }, }, { @@ -83,7 +83,7 @@ func (l LoadBalancer) Azure() armnetwork.LoadBalancer { Name: to.Ptr(debugdHealthProbeName), Properties: &armnetwork.ProbePropertiesFormat{ Protocol: to.Ptr(armnetwork.ProbeProtocolTCP), - Port: to.Ptr[int32](4000), + Port: to.Ptr[int32](constants.DebugdPort), }, }, }, @@ -94,8 +94,8 @@ func (l LoadBalancer) Azure() armnetwork.LoadBalancer { FrontendIPConfiguration: &armnetwork.SubResource{ ID: to.Ptr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/frontendIPConfigurations/" + frontEndIPConfigName), }, - FrontendPort: to.Ptr[int32](6443), - BackendPort: to.Ptr[int32](6443), + FrontendPort: to.Ptr[int32](constants.KubernetesPort), + BackendPort: to.Ptr[int32](constants.KubernetesPort), Protocol: to.Ptr(armnetwork.TransportProtocolTCP), Probe: &armnetwork.SubResource{ ID: to.Ptr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/probes/" + kubeHealthProbeName), @@ -154,8 +154,8 @@ func (l LoadBalancer) Azure() armnetwork.LoadBalancer { FrontendIPConfiguration: &armnetwork.SubResource{ ID: to.Ptr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/frontendIPConfigurations/" + frontEndIPConfigName), }, - FrontendPort: to.Ptr[int32](4000), - BackendPort: to.Ptr[int32](4000), + FrontendPort: to.Ptr[int32](constants.DebugdPort), + BackendPort: to.Ptr[int32](constants.DebugdPort), Protocol: to.Ptr(armnetwork.TransportProtocolTCP), Probe: &armnetwork.SubResource{ ID: to.Ptr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/probes/" + debugdHealthProbeName), diff --git a/cli/internal/cloudcmd/clients.go b/cli/internal/cloudcmd/clients.go index 882c1b9f9..202a78c71 100644 --- a/cli/internal/cloudcmd/clients.go +++ b/cli/internal/cloudcmd/clients.go @@ -14,11 +14,11 @@ type gcpclient interface { CreateVPCs(ctx context.Context) error CreateFirewall(ctx context.Context, input gcpcl.FirewallInput) error CreateInstances(ctx context.Context, input gcpcl.CreateInstancesInput) error - CreateLoadBalancer(ctx context.Context) error + CreateLoadBalancers(ctx context.Context) error CreateServiceAccount(ctx context.Context, input gcpcl.ServiceAccountInput) (string, error) TerminateFirewall(ctx context.Context) error TerminateVPCs(context.Context) error - TerminateLoadBalancer(context.Context) error + TerminateLoadBalancers(context.Context) error TerminateInstances(context.Context) error TerminateServiceAccount(ctx context.Context) error Close() error diff --git a/cli/internal/cloudcmd/clients_test.go b/cli/internal/cloudcmd/clients_test.go index 0ddd2906a..155540b70 100644 --- a/cli/internal/cloudcmd/clients_test.go +++ b/cli/internal/cloudcmd/clients_test.go @@ -245,11 +245,7 @@ type fakeGcpClient struct { name string zone string serviceAccount string - - // loadbalancer - healthCheck string - backendService string - forwardingRule string + loadbalancers []string } func (c *fakeGcpClient) GetState() state.ConstellationState { @@ -264,14 +260,12 @@ func (c *fakeGcpClient) GetState() state.ConstellationState { GCPNetwork: c.network, GCPSubnetwork: c.subnetwork, GCPFirewalls: c.firewalls, - GCPBackendService: c.backendService, - GCPHealthCheck: c.healthCheck, - GCPForwardingRule: c.forwardingRule, GCPProject: c.project, Name: c.name, UID: c.uid, GCPZone: c.zone, GCPServiceAccount: c.serviceAccount, + GCPLoadbalancers: c.loadbalancers, } } @@ -290,9 +284,7 @@ func (c *fakeGcpClient) SetState(stat state.ConstellationState) { c.uid = stat.UID c.zone = stat.GCPZone c.serviceAccount = stat.GCPServiceAccount - c.healthCheck = stat.GCPHealthCheck - c.backendService = stat.GCPBackendService - c.forwardingRule = stat.GCPForwardingRule + c.loadbalancers = stat.GCPLoadbalancers } func (c *fakeGcpClient) CreateVPCs(ctx context.Context) error { @@ -345,10 +337,8 @@ func (c *fakeGcpClient) CreateServiceAccount(ctx context.Context, input gcpcl.Se }.ToCloudServiceAccountURI(), nil } -func (c *fakeGcpClient) CreateLoadBalancer(ctx context.Context) error { - c.healthCheck = "health-check" - c.backendService = "backend-service" - c.forwardingRule = "forwarding-rule" +func (c *fakeGcpClient) CreateLoadBalancers(ctx context.Context) error { + c.loadbalancers = []string{"kube-lb", "boot-lb", "verify-lb"} return nil } @@ -384,10 +374,8 @@ func (c *fakeGcpClient) TerminateServiceAccount(context.Context) error { return nil } -func (c *fakeGcpClient) TerminateLoadBalancer(context.Context) error { - c.healthCheck = "" - c.backendService = "" - c.forwardingRule = "" +func (c *fakeGcpClient) TerminateLoadBalancers(context.Context) error { + c.loadbalancers = nil return nil } @@ -438,7 +426,7 @@ func (c *stubGcpClient) CreateServiceAccount(ctx context.Context, input gcpcl.Se return gcpshared.ServiceAccountKey{}.ToCloudServiceAccountURI(), c.createServiceAccountErr } -func (c *stubGcpClient) CreateLoadBalancer(ctx context.Context) error { +func (c *stubGcpClient) CreateLoadBalancers(ctx context.Context) error { return c.createLoadBalancerErr } @@ -462,7 +450,7 @@ func (c *stubGcpClient) TerminateServiceAccount(context.Context) error { return c.terminateServiceAccountErr } -func (c *stubGcpClient) TerminateLoadBalancer(context.Context) error { +func (c *stubGcpClient) TerminateLoadBalancers(context.Context) error { return c.terminateLoadBalancerErr } diff --git a/cli/internal/cloudcmd/create.go b/cli/internal/cloudcmd/create.go index 3bf4c488d..23c6b5382 100644 --- a/cli/internal/cloudcmd/create.go +++ b/cli/internal/cloudcmd/create.go @@ -133,7 +133,7 @@ func (c *Creator) createGCP(ctx context.Context, cl gcpclient, config *config.Co return state.ConstellationState{}, err } - if err := cl.CreateLoadBalancer(ctx); err != nil { + if err := cl.CreateLoadBalancers(ctx); err != nil { return state.ConstellationState{}, err } diff --git a/cli/internal/cloudcmd/create_test.go b/cli/internal/cloudcmd/create_test.go index c46fe5726..74bace924 100644 --- a/cli/internal/cloudcmd/create_test.go +++ b/cli/internal/cloudcmd/create_test.go @@ -32,9 +32,7 @@ func TestCreator(t *testing.T) { GCPControlPlaneInstanceTemplate: "controlplane-template", GCPNetwork: "network", GCPSubnetwork: "subnetwork", - GCPBackendService: "backend-service", - GCPHealthCheck: "health-check", - GCPForwardingRule: "forwarding-rule", + GCPLoadbalancers: []string{"kube-lb", "boot-lb", "verify-lb"}, GCPFirewalls: []string{ "bootstrapper", "ssh", "nodeport", "kubernetes", "allow-cluster-internal-tcp", "allow-cluster-internal-udp", "allow-cluster-internal-icmp", diff --git a/cli/internal/cloudcmd/rollback.go b/cli/internal/cloudcmd/rollback.go index 03489da6b..100c1b204 100644 --- a/cli/internal/cloudcmd/rollback.go +++ b/cli/internal/cloudcmd/rollback.go @@ -34,7 +34,7 @@ type rollbackerGCP struct { func (r *rollbackerGCP) rollback(ctx context.Context) error { var err error - err = multierr.Append(err, r.client.TerminateLoadBalancer(ctx)) + err = multierr.Append(err, r.client.TerminateLoadBalancers(ctx)) err = multierr.Append(err, r.client.TerminateInstances(ctx)) err = multierr.Append(err, r.client.TerminateFirewall(ctx)) err = multierr.Append(err, r.client.TerminateVPCs(ctx)) diff --git a/cli/internal/cloudcmd/terminate.go b/cli/internal/cloudcmd/terminate.go index 5ab86f47f..a40854e6b 100644 --- a/cli/internal/cloudcmd/terminate.go +++ b/cli/internal/cloudcmd/terminate.go @@ -53,7 +53,7 @@ func (t *Terminator) Terminate(ctx context.Context, state state.ConstellationSta func (t *Terminator) terminateGCP(ctx context.Context, cl gcpclient, state state.ConstellationState) error { cl.SetState(state) - if err := cl.TerminateLoadBalancer(ctx); err != nil { + if err := cl.TerminateLoadBalancers(ctx); err != nil { return err } if err := cl.TerminateInstances(ctx); err != nil { diff --git a/cli/internal/cmd/create.go b/cli/internal/cmd/create.go index 4fcebdbd5..39494a11d 100644 --- a/cli/internal/cmd/create.go +++ b/cli/internal/cmd/create.go @@ -211,7 +211,7 @@ func checkDirClean(fileHandler file.Handler) error { } func writeIPtoIDFile(fileHandler file.Handler, state state.ConstellationState) error { - ip := state.BootstrapperHost + ip := state.LoadBalancerIP if ip == "" { return fmt.Errorf("bootstrapper ip not found") } diff --git a/cli/internal/cmd/create_test.go b/cli/internal/cmd/create_test.go index c8320b6d7..a2dc7eadd 100644 --- a/cli/internal/cmd/create_test.go +++ b/cli/internal/cmd/create_test.go @@ -46,7 +46,7 @@ func TestCreateArgumentValidation(t *testing.T) { } func TestCreate(t *testing.T) { - testState := state.ConstellationState{Name: "test", BootstrapperHost: "192.0.2.1"} + testState := state.ConstellationState{Name: "test", LoadBalancerIP: "192.0.2.1"} someErr := errors.New("failed") testCases := map[string]struct { diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go index af688edb9..4a514aef2 100644 --- a/cli/internal/cmd/init.go +++ b/cli/internal/cmd/init.go @@ -141,6 +141,7 @@ func initialize(cmd *cobra.Command, newDialer func(validator *cloudcmd.Validator return fmt.Errorf("loading Helm charts: %w", err) } + cmd.Println("Initializing cluster ...") req := &initproto.InitRequest{ AutoscalingNodeGroups: autoscalingNodeGroups, MasterSecret: flags.masterSecret.Key, diff --git a/cli/internal/cmd/verify_test.go b/cli/internal/cmd/verify_test.go index 63e4ca0ab..db511eb57 100644 --- a/cli/internal/cmd/verify_test.go +++ b/cli/internal/cmd/verify_test.go @@ -92,7 +92,7 @@ func TestVerify(t *testing.T) { nodeEndpointFlag: "192.0.2.1", ownerIDFlag: zeroBase64, protoClient: &stubVerifyClient{}, - wantEndpoint: "192.0.2.1:30081", + wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC), }, "endpoint not set": { setupFs: func(require *require.Assertions) afero.Fs { return afero.NewMemMapFs() }, @@ -107,7 +107,7 @@ func TestVerify(t *testing.T) { ownerIDFlag: zeroBase64, protoClient: &stubVerifyClient{}, idFile: &clusterIDsFile{IP: "192.0.2.1"}, - wantEndpoint: "192.0.2.1:30081", + wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC), }, "override endpoint from details file": { setupFs: func(require *require.Assertions) afero.Fs { return afero.NewMemMapFs() }, diff --git a/cli/internal/gcp/client/api.go b/cli/internal/gcp/client/api.go index 0c9bc6133..2f90b0020 100644 --- a/cli/internal/gcp/client/api.go +++ b/cli/internal/gcp/client/api.go @@ -105,15 +105,30 @@ type instanceGroupManagersAPI interface { type iamAPI interface { Close() error - CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) - CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) - DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, opts ...gax.CallOption) error + CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, + opts ...gax.CallOption) (*adminpb.ServiceAccount, error) + CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, + opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) + DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, + opts ...gax.CallOption) error } type projectsAPI interface { Close() error - GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) - SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) + GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, + opts ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, + opts ...gax.CallOption) (*iampb.Policy, error) +} + +type addressesAPI interface { + Close() error + Insert(ctx context.Context, req *computepb.InsertAddressRequest, + opts ...gax.CallOption) (Operation, error) + Get(ctx context.Context, req *computepb.GetAddressRequest, + opts ...gax.CallOption) (*computepb.Address, error) + Delete(ctx context.Context, req *computepb.DeleteAddressRequest, + opts ...gax.CallOption) (Operation, error) } type Operation interface { diff --git a/cli/internal/gcp/client/api_test.go b/cli/internal/gcp/client/api_test.go index e5aebdbe9..c99eac479 100644 --- a/cli/internal/gcp/client/api_test.go +++ b/cli/internal/gcp/client/api_test.go @@ -548,3 +548,48 @@ func (i *stubManagedInstanceIterator) Next() (*computepb.ManagedInstance, error) i.internalCounter++ return resp, nil } + +type stubAddressesAPI struct { + insertErr error + getAddr *string + getErr error + deleteErr error +} + +func (a stubAddressesAPI) Insert(context.Context, *computepb.InsertAddressRequest, + ...gax.CallOption, +) (Operation, error) { + if a.insertErr != nil { + return nil, a.insertErr + } + return &stubOperation{ + &computepb.Operation{ + Name: proto.String("name"), + Region: proto.String("region"), + }, + }, nil +} + +func (a stubAddressesAPI) Get(ctx context.Context, req *computepb.GetAddressRequest, + opts ...gax.CallOption, +) (*computepb.Address, error) { + return &computepb.Address{Address: a.getAddr}, a.getErr +} + +func (a stubAddressesAPI) Delete(context.Context, *computepb.DeleteAddressRequest, + ...gax.CallOption, +) (Operation, error) { + if a.deleteErr != nil { + return nil, a.deleteErr + } + return &stubOperation{ + &computepb.Operation{ + Name: proto.String("name"), + Region: proto.String("region"), + }, + }, nil +} + +func (a stubAddressesAPI) Close() error { + return nil +} diff --git a/cli/internal/gcp/client/client.go b/cli/internal/gcp/client/client.go index 35d9af77d..ad9691ac6 100644 --- a/cli/internal/gcp/client/client.go +++ b/cli/internal/gcp/client/client.go @@ -36,6 +36,7 @@ type Client struct { instanceGroupManagersAPI iamAPI projectsAPI + addressesAPI workers cloudtypes.Instances controlPlanes cloudtypes.Instances @@ -56,9 +57,9 @@ type Client struct { serviceAccount string // loadbalancer - healthCheck string - backendService string - forwardingRule string + loadbalancerIP string + loadbalancerIPname string + loadbalancers []*loadBalancer } // NewFromDefault creates an uninitialized client. @@ -152,6 +153,13 @@ func NewFromDefault(ctx context.Context) (*Client, error) { _ = closeAll(closers) return nil, err } + closers = append(closers, projectsAPI) + addressesAPI, err := compute.NewAddressesRESTClient(ctx) + if err != nil { + _ = closeAll(closers) + return nil, err + } + return &Client{ instanceAPI: &instanceClient{insAPI}, operationRegionAPI: opRegionAPI, @@ -167,6 +175,7 @@ func NewFromDefault(ctx context.Context) (*Client, error) { instanceGroupManagersAPI: &instanceGroupManagersClient{groupAPI}, iamAPI: &iamClient{iamAPI}, projectsAPI: &projectsClient{projectsAPI}, + addressesAPI: &addressesClient{addressesAPI}, workers: make(cloudtypes.Instances), controlPlanes: make(cloudtypes.Instances), }, nil @@ -221,6 +230,7 @@ func (c *Client) Close() error { c.instanceGroupManagersAPI, c.iamAPI, c.projectsAPI, + c.addressesAPI, } return closeAll(closers) } @@ -242,11 +252,11 @@ func (c *Client) init(project, zone, region, name string) error { // GetState returns the state of the client as ConstellationState. func (c *Client) GetState() state.ConstellationState { - return state.ConstellationState{ + stat := state.ConstellationState{ Name: c.name, UID: c.uid, CloudProvider: cloudprovider.GCP.String(), - BootstrapperHost: c.controlPlanes.PublicIPs()[0], + LoadBalancerIP: c.loadbalancerIP, GCPProject: c.project, GCPZone: c.zone, GCPRegion: c.region, @@ -259,11 +269,13 @@ func (c *Client) GetState() state.ConstellationState { GCPFirewalls: c.firewalls, GCPNetwork: c.network, GCPSubnetwork: c.subnetwork, - GCPHealthCheck: c.healthCheck, - GCPBackendService: c.backendService, - GCPForwardingRule: c.forwardingRule, GCPServiceAccount: c.serviceAccount, + GCPLoadbalancerIPname: c.loadbalancerIPname, } + for _, lb := range c.loadbalancers { + stat.GCPLoadbalancers = append(stat.GCPLoadbalancers, lb.name) + } + return stat } // SetState sets the state of the client to the handed ConstellationState. @@ -282,10 +294,18 @@ func (c *Client) SetState(stat state.ConstellationState) { c.subnetwork = stat.GCPSubnetwork c.workerTemplate = stat.GCPWorkerInstanceTemplate c.controlPlaneTemplate = stat.GCPControlPlaneInstanceTemplate - c.healthCheck = stat.GCPHealthCheck - c.backendService = stat.GCPBackendService - c.forwardingRule = stat.GCPForwardingRule + c.loadbalancerIPname = stat.GCPLoadbalancerIPname + c.loadbalancerIP = stat.LoadBalancerIP c.serviceAccount = stat.GCPServiceAccount + for _, lbName := range stat.GCPLoadbalancers { + lb := &loadBalancer{ + name: lbName, + hasForwardingRules: true, + hasBackendService: true, + hasHealthCheck: true, + } + c.loadbalancers = append(c.loadbalancers, lb) + } } func (c *Client) generateUID() (string, error) { diff --git a/cli/internal/gcp/client/client_test.go b/cli/internal/gcp/client/client_test.go index 3d13819ac..a0f557260 100644 --- a/cli/internal/gcp/client/client_test.go +++ b/cli/internal/gcp/client/client_test.go @@ -2,6 +2,7 @@ package client import ( "errors" + "net/http" "testing" "github.com/edgelesssys/constellation/internal/cloud/cloudprovider" @@ -10,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "google.golang.org/api/googleapi" ) func TestMain(m *testing.M) { @@ -41,16 +43,13 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", - BootstrapperHost: "ip3", + LoadBalancerIP: "ip5", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, GCPWorkerInstanceTemplate: "temp-id", GCPControlPlaneInstanceTemplate: "temp-id", - GCPServiceAccount: "service-account", - GCPBackendService: "backend-service-id", - GCPHealthCheck: "health-check-id", - GCPForwardingRule: "forwarding-rule-id", + GCPLoadbalancers: []string{"lb-1", "lb-2"}, } t.Run("SetState", func(t *testing.T) { @@ -71,6 +70,13 @@ func TestSetGetState(t *testing.T) { assert.Equal(state.GCPControlPlaneInstanceTemplate, client.controlPlaneTemplate) assert.Equal(state.GCPWorkerInstanceTemplate, client.workerTemplate) assert.Equal(state.GCPServiceAccount, client.serviceAccount) + assert.Equal(state.LoadBalancerIP, client.loadbalancerIP) + for _, lb := range client.loadbalancers { + assert.Contains(state.GCPLoadbalancers, lb.name) + assert.True(lb.hasBackendService) + assert.True(lb.hasHealthCheck) + assert.True(lb.hasForwardingRules) + } }) t.Run("GetState", func(t *testing.T) { @@ -92,9 +98,11 @@ func TestSetGetState(t *testing.T) { workerTemplate: state.GCPWorkerInstanceTemplate, controlPlaneTemplate: state.GCPControlPlaneInstanceTemplate, serviceAccount: state.GCPServiceAccount, - healthCheck: state.GCPHealthCheck, - backendService: state.GCPBackendService, - forwardingRule: state.GCPForwardingRule, + loadbalancerIP: state.LoadBalancerIP, + loadbalancerIPname: state.GCPLoadbalancerIPname, + } + for _, lbName := range state.GCPLoadbalancers { + client.loadbalancers = append(client.loadbalancers, &loadBalancer{name: lbName}) } stat := client.GetState() @@ -141,3 +149,21 @@ func (c *someCloser) Close() error { c.closed = true return c.closeErr } + +func TestIsNotFoundError(t *testing.T) { + testCases := map[string]struct { + err error + result bool + }{ + "not found error": {err: &googleapi.Error{Code: http.StatusNotFound}, result: true}, + "nil error": {err: nil, result: false}, + "other error": {err: errors.New("failed"), result: false}, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + assert.Equal(tc.result, isNotFoundError(tc.err)) + }) + } +} diff --git a/cli/internal/gcp/client/gcpwrappers.go b/cli/internal/gcp/client/gcpwrappers.go index c9337f9fa..35f0c4355 100644 --- a/cli/internal/gcp/client/gcpwrappers.go +++ b/cli/internal/gcp/client/gcpwrappers.go @@ -212,15 +212,21 @@ func (c *iamClient) Close() error { return c.IamClient.Close() } -func (c *iamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { +func (c *iamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, + opts ...gax.CallOption, +) (*adminpb.ServiceAccount, error) { return c.IamClient.CreateServiceAccount(ctx, req) } -func (c *iamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { +func (c *iamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, + opts ...gax.CallOption, +) (*adminpb.ServiceAccountKey, error) { return c.IamClient.CreateServiceAccountKey(ctx, req) } -func (c *iamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, opts ...gax.CallOption) error { +func (c *iamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, + opts ...gax.CallOption, +) error { return c.IamClient.DeleteServiceAccount(ctx, req) } @@ -232,10 +238,34 @@ func (c *projectsClient) Close() error { return c.ProjectsClient.Close() } -func (c *projectsClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { +func (c *projectsClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, + opts ...gax.CallOption, +) (*iampb.Policy, error) { return c.ProjectsClient.GetIamPolicy(ctx, req) } -func (c *projectsClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { +func (c *projectsClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, + opts ...gax.CallOption, +) (*iampb.Policy, error) { return c.ProjectsClient.SetIamPolicy(ctx, req) } + +type addressesClient struct { + *compute.AddressesClient +} + +func (c *addressesClient) Insert(ctx context.Context, req *computepb.InsertAddressRequest, + opts ...gax.CallOption, +) (Operation, error) { + return c.AddressesClient.Insert(ctx, req) +} + +func (c *addressesClient) Delete(ctx context.Context, req *computepb.DeleteAddressRequest, + opts ...gax.CallOption, +) (Operation, error) { + return c.AddressesClient.Delete(ctx, req) +} + +func (c *addressesClient) Close() error { + return c.AddressesClient.Close() +} diff --git a/cli/internal/gcp/client/instances.go b/cli/internal/gcp/client/instances.go index fe05b1d90..0bc50d1bd 100644 --- a/cli/internal/gcp/client/instances.go +++ b/cli/internal/gcp/client/instances.go @@ -9,6 +9,7 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/role" "github.com/edgelesssys/constellation/internal/cloud/cloudtypes" + "github.com/edgelesssys/constellation/internal/constants" "google.golang.org/api/iterator" computepb "google.golang.org/genproto/googleapis/cloud/compute/v1" "google.golang.org/protobuf/proto" @@ -76,8 +77,14 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput ops = []Operation{} controlPlaneGroupInput := instanceGroupManagerInput{ - Count: input.CountControlPlanes, - Name: strings.Join([]string{c.name, "control-plane", c.uid}, "-"), + Count: input.CountControlPlanes, + Name: strings.Join([]string{c.name, "control-plane", c.uid}, "-"), + NamedPorts: []*computepb.NamedPort{ + {Name: proto.String("kubernetes"), Port: proto.Int32(constants.KubernetesPort)}, + {Name: proto.String("debugd"), Port: proto.Int32(constants.DebugdPort)}, + {Name: proto.String("bootstrapper"), Port: proto.Int32(constants.BootstrapperPort)}, + {Name: proto.String("verify"), Port: proto.Int32(constants.VerifyServiceNodePortGRPC)}, + }, Template: c.controlPlaneTemplate, UID: c.uid, Project: c.project, @@ -277,18 +284,20 @@ func (c *Client) getInstanceIPs(ctx context.Context, groupID string, list cloudt } type instanceGroupManagerInput struct { - Count int - Name string - Template string - Project string - Zone string - UID string + Count int + Name string + NamedPorts []*computepb.NamedPort + Template string + Project string + Zone string + UID string } func (i *instanceGroupManagerInput) InsertInstanceGroupManagerRequest() computepb.InsertInstanceGroupManagerRequest { return computepb.InsertInstanceGroupManagerRequest{ InstanceGroupManagerResource: &computepb.InstanceGroupManager{ BaseInstanceName: proto.String(i.Name), + NamedPorts: i.NamedPorts, InstanceTemplate: proto.String("projects/" + i.Project + "/global/instanceTemplates/" + i.Template), Name: proto.String(i.Name), TargetSize: proto.Int32(int32(i.Count)), diff --git a/cli/internal/gcp/client/loadbalancer.go b/cli/internal/gcp/client/loadbalancer.go new file mode 100644 index 000000000..8077fdfe7 --- /dev/null +++ b/cli/internal/gcp/client/loadbalancer.go @@ -0,0 +1,426 @@ +package client + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/edgelesssys/constellation/internal/constants" + "go.uber.org/multierr" + computepb "google.golang.org/genproto/googleapis/cloud/compute/v1" + "google.golang.org/protobuf/proto" +) + +type loadBalancer struct { + name string + + // For creation. + ip string + frontendPort int + backendPortName string + healthCheck computepb.HealthCheck_Type + label bool + + // For resource management. + hasHealthCheck bool + hasBackendService bool + hasForwardingRules bool +} + +// CreateLoadBalancers creates all necessary load balancers. +func (c *Client) CreateLoadBalancers(ctx context.Context) error { + if err := c.createIPAddr(ctx); err != nil { + return fmt.Errorf("creating load balancer IP address: %w", err) + } + + // + // LoadBalancer definitions. + // + + c.loadbalancers = append(c.loadbalancers, &loadBalancer{ + name: c.name + "-" + "kube" + "-" + c.uid, + ip: c.loadbalancerIP, + frontendPort: constants.KubernetesPort, + backendPortName: "kubernetes", + healthCheck: computepb.HealthCheck_HTTPS, + label: true, // Label, so bootstrapper can find kube-apiserver. + }) + + c.loadbalancers = append(c.loadbalancers, &loadBalancer{ + name: c.name + "-" + "boot" + "-" + c.uid, + ip: c.loadbalancerIPname, + frontendPort: constants.BootstrapperPort, + backendPortName: "bootstrapper", + healthCheck: computepb.HealthCheck_TCP, + }) + + c.loadbalancers = append(c.loadbalancers, &loadBalancer{ + name: c.name + "-" + "verify" + "-" + c.uid, + ip: c.loadbalancerIPname, + frontendPort: constants.VerifyServiceNodePortGRPC, + backendPortName: "verify", + healthCheck: computepb.HealthCheck_TCP, + }) + + c.loadbalancers = append(c.loadbalancers, &loadBalancer{ + name: c.name + "-" + "debugd" + "-" + c.uid, + ip: c.loadbalancerIPname, + frontendPort: constants.DebugdPort, + backendPortName: "debugd", + healthCheck: computepb.HealthCheck_TCP, + }) + + // Load balancer creation. + + errC := make(chan error) + createLB := func(ctx context.Context, lb *loadBalancer) { + errC <- c.createLoadBalancer(ctx, lb) + } + + for _, lb := range c.loadbalancers { + go createLB(ctx, lb) + } + + var err error + for i := 0; i < len(c.loadbalancers); i++ { + err = multierr.Append(err, <-errC) + } + + return err +} + +// createLoadBalancer creates a load balancer. +func (c *Client) createLoadBalancer(ctx context.Context, lb *loadBalancer) error { + if err := c.createHealthCheck(ctx, lb); err != nil { + return fmt.Errorf("creating health checks: %w", err) + } + if err := c.createBackendService(ctx, lb); err != nil { + return fmt.Errorf("creating backend services: %w", err) + } + if err := c.createForwardingRules(ctx, lb); err != nil { + return fmt.Errorf("creating forwarding rules: %w", err) + } + return nil +} + +func (c *Client) createHealthCheck(ctx context.Context, lb *loadBalancer) error { + req := &computepb.InsertRegionHealthCheckRequest{ + Project: c.project, + Region: c.region, + HealthCheckResource: &computepb.HealthCheck{ + Name: proto.String(lb.name), + Type: proto.String(computepb.HealthCheck_Type_name[int32(lb.healthCheck)]), + CheckIntervalSec: proto.Int32(1), + TimeoutSec: proto.Int32(1), + }, + } + switch lb.healthCheck { + case computepb.HealthCheck_HTTPS: + req.HealthCheckResource.HttpsHealthCheck = newHealthCheckHTTPS(lb.frontendPort) + case computepb.HealthCheck_TCP: + req.HealthCheckResource.TcpHealthCheck = newHealthCheckTCP(lb.frontendPort) + } + resp, err := c.healthChecksAPI.Insert(ctx, req) + if err != nil { + return fmt.Errorf("inserting health check: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return fmt.Errorf("waiting for health check creation: %w", err) + } + + lb.hasHealthCheck = true + return nil +} + +func (c *Client) createBackendService(ctx context.Context, lb *loadBalancer) error { + req := &computepb.InsertRegionBackendServiceRequest{ + Project: c.project, + Region: c.region, + BackendServiceResource: &computepb.BackendService{ + Name: proto.String(lb.name), + Protocol: proto.String(computepb.BackendService_Protocol_name[int32(computepb.BackendService_TCP)]), + LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(computepb.BackendService_EXTERNAL)]), + HealthChecks: []string{"https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/healthChecks/" + lb.name}, + PortName: proto.String(lb.backendPortName), + Backends: []*computepb.Backend{ + { + BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_CONNECTION)]), + Group: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/zones/" + c.zone + "/instanceGroups/" + c.controlPlaneInstanceGroup), + }, + }, + }, + } + resp, err := c.backendServicesAPI.Insert(ctx, req) + if err != nil { + return fmt.Errorf("inserting backend services: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return fmt.Errorf("waiting for backend services creation: %w", err) + } + + lb.hasBackendService = true + return nil +} + +func (c *Client) createForwardingRules(ctx context.Context, lb *loadBalancer) error { + req := &computepb.InsertForwardingRuleRequest{ + Project: c.project, + Region: c.region, + ForwardingRuleResource: &computepb.ForwardingRule{ + Name: proto.String(lb.name), + IPAddress: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/addresses/" + c.loadbalancerIPname), + IPProtocol: proto.String(computepb.ForwardingRule_IPProtocolEnum_name[int32(computepb.ForwardingRule_TCP)]), + LoadBalancingScheme: proto.String(computepb.ForwardingRule_LoadBalancingScheme_name[int32(computepb.ForwardingRule_EXTERNAL)]), + Ports: []string{strconv.Itoa(lb.frontendPort)}, + BackendService: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/backendServices/" + lb.name), + }, + } + resp, err := c.forwardingRulesAPI.Insert(ctx, req) + if err != nil { + return fmt.Errorf("inserting forwarding rules: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return err + } + lb.hasForwardingRules = true + + if lb.label { + return c.labelLoadBalancer(ctx, lb.name) + } + return nil +} + +// labelLoadBalancer labels a load balancer (its forwarding rules) so that it can be found by applications in the cluster. +func (c *Client) labelLoadBalancer(ctx context.Context, name string) error { + forwardingRule, err := c.forwardingRulesAPI.Get(ctx, &computepb.GetForwardingRuleRequest{ + Project: c.project, + Region: c.region, + ForwardingRule: name, + }) + if err != nil { + return fmt.Errorf("getting forwarding rule: %w", err) + } + if forwardingRule.LabelFingerprint == nil { + return fmt.Errorf("forwarding rule %s has no label fingerprint", name) + } + + resp, err := c.forwardingRulesAPI.SetLabels(ctx, &computepb.SetLabelsForwardingRuleRequest{ + Project: c.project, + Region: c.region, + Resource: name, + RegionSetLabelsRequestResource: &computepb.RegionSetLabelsRequest{ + Labels: map[string]string{"constellation-uid": c.uid}, + LabelFingerprint: forwardingRule.LabelFingerprint, + }, + }) + if err != nil { + return fmt.Errorf("setting forwarding rule labels: %w", err) + } + + return c.waitForOperations(ctx, []Operation{resp}) +} + +// TerminateLoadBalancers terminates all load balancers. +func (c *Client) TerminateLoadBalancers(ctx context.Context) error { + errC := make(chan error) + + terminateLB := func(ctx context.Context, lb *loadBalancer) { + errC <- c.terminateLoadBalancer(ctx, lb) + } + + for _, lb := range c.loadbalancers { + go terminateLB(ctx, lb) + } + + var err error + for i := 0; i < len(c.loadbalancers); i++ { + err = multierr.Append(err, <-errC) + } + if err != nil && !isNotFoundError(err) { + return err + } + + if err := c.deleteIPAddr(ctx); err != nil { + return fmt.Errorf("deleting load balancer IP address: %w", err) + } + + c.loadbalancers = nil + return nil +} + +// terminateLoadBalancer removes the load balancer and its associated resources. +func (c *Client) terminateLoadBalancer(ctx context.Context, lb *loadBalancer) error { + if lb == nil { + return nil + } + if lb.name == "" { + return errors.New("load balancer name is empty") + } + + if lb.hasForwardingRules { + if err := c.terminateForwadingRules(ctx, lb); err != nil { + return fmt.Errorf("terminating forwarding rules: %w", err) + } + } + + if lb.hasBackendService { + if err := c.terminateBackendService(ctx, lb); err != nil { + return fmt.Errorf("terminating backend services: %w", err) + } + } + + if lb.hasHealthCheck { + if err := c.terminateHealthCheck(ctx, lb); err != nil { + return fmt.Errorf("terminating health checks: %w", err) + } + } + + lb.name = "" + return nil +} + +func (c *Client) terminateForwadingRules(ctx context.Context, lb *loadBalancer) error { + resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteForwardingRuleRequest{ + Project: c.project, + Region: c.region, + ForwardingRule: lb.name, + }) + if isNotFoundError(err) { + lb.hasForwardingRules = false + return nil + } + if err != nil { + return fmt.Errorf("deleting forwarding rules: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return err + } + + lb.hasForwardingRules = false + return nil +} + +func (c *Client) terminateBackendService(ctx context.Context, lb *loadBalancer) error { + resp, err := c.backendServicesAPI.Delete(ctx, &computepb.DeleteRegionBackendServiceRequest{ + Project: c.project, + Region: c.region, + BackendService: lb.name, + }) + if isNotFoundError(err) { + lb.hasBackendService = false + return nil + } + if err != nil { + return fmt.Errorf("deleting backend services: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return err + } + + lb.hasBackendService = false + return nil +} + +func (c *Client) terminateHealthCheck(ctx context.Context, lb *loadBalancer) error { + resp, err := c.healthChecksAPI.Delete(ctx, &computepb.DeleteRegionHealthCheckRequest{ + Project: c.project, + Region: c.region, + HealthCheck: lb.name, + }) + if isNotFoundError(err) { + lb.hasHealthCheck = false + return nil + } + if err != nil { + return fmt.Errorf("deleting health checks: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return err + } + + lb.hasHealthCheck = false + return nil +} + +func (c *Client) createIPAddr(ctx context.Context) error { + ipName := c.name + "-" + c.uid + insertReq := &computepb.InsertAddressRequest{ + Project: c.project, + Region: c.region, + AddressResource: &computepb.Address{ + Name: proto.String(ipName), + }, + } + op, err := c.addressesAPI.Insert(ctx, insertReq) + if err != nil { + return fmt.Errorf("inserting address: %w", err) + } + if err := c.waitForOperations(ctx, []Operation{op}); err != nil { + return err + } + c.loadbalancerIPname = c.name + "-" + c.uid + + getReq := &computepb.GetAddressRequest{ + Project: c.project, + Region: c.region, + Address: c.loadbalancerIPname, + } + addr, err := c.addressesAPI.Get(ctx, getReq) + if err != nil { + return fmt.Errorf("getting address: %w", err) + } + if addr.Address == nil { + return fmt.Errorf("address response without address: %q", addr) + } + + c.loadbalancerIP = *addr.Address + return nil +} + +func (c *Client) deleteIPAddr(ctx context.Context) error { + if c.loadbalancerIPname == "" { + return nil + } + + req := &computepb.DeleteAddressRequest{ + Project: c.project, + Region: c.region, + Address: c.loadbalancerIPname, + } + op, err := c.addressesAPI.Delete(ctx, req) + if isNotFoundError(err) { + c.loadbalancerIPname = "" + return nil + } + if err != nil { + return fmt.Errorf("deleting address: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{op}); err != nil { + return err + } + + c.loadbalancerIPname = "" + return nil +} + +func newHealthCheckHTTPS(port int) *computepb.HTTPSHealthCheck { + return &computepb.HTTPSHealthCheck{ + Host: proto.String(""), + Port: proto.Int32(int32(port)), + RequestPath: proto.String("/readyz"), + } +} + +func newHealthCheckTCP(port int) *computepb.TCPHealthCheck { + return &computepb.TCPHealthCheck{ + Port: proto.Int32(int32(port)), + } +} diff --git a/cli/internal/gcp/client/loadbalancer_test.go b/cli/internal/gcp/client/loadbalancer_test.go new file mode 100644 index 000000000..94c5e24e4 --- /dev/null +++ b/cli/internal/gcp/client/loadbalancer_test.go @@ -0,0 +1,628 @@ +package client + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/cloud/compute/v1" + "google.golang.org/protobuf/proto" +) + +func TestCreateLoadBalancers(t *testing.T) { + someErr := errors.New("failed") + forwardingRule := &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")} + + testCases := map[string]struct { + addrAPI addressesAPI + healthAPI healthChecksAPI + backendAPI backendServicesAPI + forwardAPI forwardingRulesAPI + opRegAPI operationRegionAPI + wantErr bool + }{ + "successful create": { + addrAPI: &stubAddressesAPI{getAddr: proto.String("192.0.2.1")}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + opRegAPI: stubOperationRegionAPI{}, + }, + "createIPAddr fails": { + addrAPI: &stubAddressesAPI{insertErr: someErr}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + opRegAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + "createLB fails": { + addrAPI: &stubAddressesAPI{}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{insertErr: someErr}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + opRegAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + client := Client{ + project: "project", + zone: "zone", + name: "name", + uid: "uid", + addressesAPI: tc.addrAPI, + healthChecksAPI: tc.healthAPI, + backendServicesAPI: tc.backendAPI, + forwardingRulesAPI: tc.forwardAPI, + operationRegionAPI: tc.opRegAPI, + } + + err := client.CreateLoadBalancers(ctx) + + if tc.wantErr { + assert.Error(err) + } else { + assert.NoError(err) + assert.NotEmpty(client.loadbalancerIPname) + assert.Equal(4, len(client.loadbalancers)) + } + }) + } +} + +func TestCreateLoadBalancer(t *testing.T) { + someErr := errors.New("failed") + testCases := map[string]struct { + operationRegionAPI operationRegionAPI + healthChecksAPI healthChecksAPI + backendServicesAPI backendServicesAPI + forwardingRulesAPI forwardingRulesAPI + wantErr bool + wantLB *loadBalancer + }{ + "successful create": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + }, + }, + "successful create with label": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + label: true, + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + }, + }, + "CreateLoadBalancer fails when getting forwarding rule": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{getErr: someErr}, + operationRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + label: true, + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + }, + }, + "CreateLoadBalancer fails when label fingerprint is missing": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{}}, + operationRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + label: true, + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + }, + }, + "CreateLoadBalancer fails when creating health check": { + healthChecksAPI: stubHealthChecksAPI{insertErr: someErr}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + hasHealthCheck: false, + hasBackendService: false, + hasForwardingRules: false, + }, + }, + "CreateLoadBalancer fails when creating backend service": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{insertErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + operationRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + hasHealthCheck: true, + hasBackendService: false, + hasForwardingRules: false, + }, + }, + "CreateLoadBalancer fails when creating forwarding rule": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{insertErr: someErr}, + operationRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: false, + }, + }, + "CreateLoadBalancer fails when waiting on operation": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationRegionAPI: stubOperationRegionAPI{waitErr: someErr}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + hasHealthCheck: false, + hasBackendService: false, + hasForwardingRules: false, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + client := Client{ + project: "project", + zone: "zone", + name: "name", + uid: "uid", + backendServicesAPI: tc.backendServicesAPI, + forwardingRulesAPI: tc.forwardingRulesAPI, + healthChecksAPI: tc.healthChecksAPI, + operationRegionAPI: tc.operationRegionAPI, + } + lb := &loadBalancer{ + name: tc.wantLB.name, + frontendPort: tc.wantLB.frontendPort, + backendPortName: tc.wantLB.backendPortName, + label: tc.wantLB.label, + } + + err := client.createLoadBalancer(ctx, lb) + + if tc.wantErr { + assert.Error(err) + assert.Equal(tc.wantLB, lb) + } else { + assert.NoError(err) + assert.Equal(tc.wantLB, lb) + } + }) + } +} + +func TestTerminateLoadbalancers(t *testing.T) { + someErr := errors.New("failed") + newRunningLB := func() *loadBalancer { + return &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + } + } + + testCases := map[string]struct { + addrAPI addressesAPI + healthAPI healthChecksAPI + backendAPI backendServicesAPI + forwardAPI forwardingRulesAPI + opRegionAPI operationRegionAPI + wantErr bool + }{ + "successful terminate": { + addrAPI: &stubAddressesAPI{}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + forwardAPI: &stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + }, + "deleteIPAddr fails": { + addrAPI: &stubAddressesAPI{deleteErr: someErr}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + forwardAPI: &stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + "deleteLB fails": { + addrAPI: &stubAddressesAPI{}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{deleteErr: someErr}, + forwardAPI: &stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + client := Client{ + project: "project", + zone: "zone", + name: "name", + uid: "uid", + addressesAPI: tc.addrAPI, + healthChecksAPI: tc.healthAPI, + backendServicesAPI: tc.backendAPI, + forwardingRulesAPI: tc.forwardAPI, + operationRegionAPI: tc.opRegionAPI, + loadbalancerIPname: "loadbalancerIPid", + loadbalancers: []*loadBalancer{ + newRunningLB(), + newRunningLB(), + newRunningLB(), + }, + } + + err := client.TerminateLoadBalancers(ctx) + + if tc.wantErr { + assert.Error(err) + } else { + assert.NoError(err) + assert.Empty(client.loadbalancerIPname) + assert.Nil(client.loadbalancers) + } + }) + } +} + +func TestTerminateLoadBalancer(t *testing.T) { + someErr := errors.New("failed") + notFoundErr := &googleapi.Error{Code: http.StatusNotFound} + newRunningLB := func() *loadBalancer { + return &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + } + } + + testCases := map[string]struct { + lb *loadBalancer + opRegionAPI operationRegionAPI + healthChecksAPI healthChecksAPI + backendServicesAPI backendServicesAPI + forwardingRulesAPI forwardingRulesAPI + wantErr bool + wantLB *loadBalancer + }{ + "successful terminate": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{}, + }, + "terminate partially created loadbalancer": { + lb: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: false, + }, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, + opRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{}, + }, + "terminate partially created loadbalancer 2": { + lb: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: false, + hasForwardingRules: false, + }, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, + opRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{}, + }, + "no-op for nil loadbalancer": { + lb: nil, + }, + "health check not found": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{deleteErr: notFoundErr}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{}, + }, + "backend service not found": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{deleteErr: notFoundErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{}, + }, + "forwarding rules not found": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: notFoundErr}, + opRegionAPI: stubOperationRegionAPI{}, + wantLB: &loadBalancer{}, + }, + "fails for loadbalancer without name": { + lb: &loadBalancer{}, + wantErr: true, + wantLB: &loadBalancer{}, + }, + "fails when deleting health check": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{deleteErr: someErr}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: false, + hasForwardingRules: false, + }, + }, + "fails when deleting backend service": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: false, + }, + }, + "fails when deleting forwarding rule": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, + opRegionAPI: stubOperationRegionAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + }, + }, + "fails when waiting on operation": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + opRegionAPI: stubOperationRegionAPI{waitErr: someErr}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: true, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + client := Client{ + project: "project", + zone: "zone", + name: "name", + uid: "uid", + backendServicesAPI: tc.backendServicesAPI, + forwardingRulesAPI: tc.forwardingRulesAPI, + healthChecksAPI: tc.healthChecksAPI, + operationRegionAPI: tc.opRegionAPI, + } + + err := client.terminateLoadBalancer(ctx, tc.lb) + + if tc.wantErr { + assert.Error(err) + assert.Equal(tc.wantLB, tc.lb) + } else { + assert.NoError(err) + assert.Equal(tc.wantLB, tc.lb) + } + }) + } +} + +func TestCreateIPAddr(t *testing.T) { + someErr := errors.New("failed") + + testCases := map[string]struct { + addrAPI addressesAPI + opAPI operationRegionAPI + wantErr bool + }{ + "successful create": { + addrAPI: stubAddressesAPI{getAddr: proto.String("test-ip")}, + opAPI: stubOperationRegionAPI{}, + }, + "insert fails": { + addrAPI: stubAddressesAPI{insertErr: someErr}, + opAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + "get fails": { + addrAPI: stubAddressesAPI{getErr: someErr}, + opAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + "get address nil": { + addrAPI: stubAddressesAPI{getAddr: nil}, + opAPI: stubOperationRegionAPI{}, + wantErr: true, + }, + "wait fails": { + addrAPI: stubAddressesAPI{}, + opAPI: stubOperationRegionAPI{waitErr: someErr}, + wantErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + client := Client{ + project: "project", + zone: "zone", + name: "name", + uid: "uid", + addressesAPI: tc.addrAPI, + operationRegionAPI: tc.opAPI, + } + + err := client.createIPAddr(ctx) + + if tc.wantErr { + assert.Error(err) + } else { + assert.NoError(err) + assert.Equal("test-ip", client.loadbalancerIP) + assert.Equal("name-uid", client.loadbalancerIPname) + } + }) + } +} + +func TestDeleteIPAddr(t *testing.T) { + someErr := errors.New("failed") + notFoundErr := &googleapi.Error{Code: http.StatusNotFound} + + testCases := map[string]struct { + addrAPI addressesAPI + opAPI operationRegionAPI + addrID string + wantErr bool + }{ + "successful delete": { + addrAPI: stubAddressesAPI{}, + opAPI: stubOperationRegionAPI{}, + addrID: "name", + }, + "not found": { + addrAPI: stubAddressesAPI{deleteErr: notFoundErr}, + opAPI: stubOperationRegionAPI{}, + addrID: "name", + }, + "empty is no-op": { + addrAPI: stubAddressesAPI{}, + opAPI: stubOperationRegionAPI{}, + }, + "delete fails": { + addrAPI: stubAddressesAPI{deleteErr: someErr}, + opAPI: stubOperationRegionAPI{}, + addrID: "name", + wantErr: true, + }, + "wait fails": { + addrAPI: stubAddressesAPI{}, + opAPI: stubOperationRegionAPI{waitErr: someErr}, + addrID: "name", + wantErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + ctx := context.Background() + client := Client{ + project: "project", + zone: "zone", + name: "name", + uid: "uid", + addressesAPI: tc.addrAPI, + operationRegionAPI: tc.opAPI, + loadbalancerIPname: tc.addrID, + } + + err := client.deleteIPAddr(ctx) + + if tc.wantErr { + assert.Error(err) + } else { + assert.NoError(err) + assert.Empty(client.loadbalancerIPname) + } + }) + } +} diff --git a/cli/internal/gcp/client/network.go b/cli/internal/gcp/client/network.go index 485dceb6b..2b6bc3d53 100644 --- a/cli/internal/gcp/client/network.go +++ b/cli/internal/gcp/client/network.go @@ -3,11 +3,8 @@ package client import ( "context" "errors" - "fmt" - "strconv" "github.com/edgelesssys/constellation/internal/cloud/cloudtypes" - "github.com/edgelesssys/constellation/internal/constants" computepb "google.golang.org/genproto/googleapis/cloud/compute/v1" "google.golang.org/protobuf/proto" ) @@ -226,148 +223,3 @@ func (c *Client) terminateSubnet(ctx context.Context) error { c.subnetwork = "" return nil } - -// CreateLoadBalancer creates a load balancer. -func (c *Client) CreateLoadBalancer(ctx context.Context) error { - c.healthCheck = c.name + "-" + c.uid - resp, err := c.healthChecksAPI.Insert(ctx, &computepb.InsertRegionHealthCheckRequest{ - Project: c.project, - Region: c.region, - HealthCheckResource: &computepb.HealthCheck{ - Name: proto.String(c.healthCheck), - Type: proto.String(computepb.HealthCheck_Type_name[int32(computepb.HealthCheck_HTTPS)]), - CheckIntervalSec: proto.Int32(1), - TimeoutSec: proto.Int32(1), - HttpsHealthCheck: &computepb.HTTPSHealthCheck{ - Host: proto.String(""), - Port: proto.Int32(6443), - RequestPath: proto.String("/readyz"), - }, - }, - }) - if err != nil { - return err - } - if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { - return err - } - - c.backendService = c.name + "-" + c.uid - resp, err = c.backendServicesAPI.Insert(ctx, &computepb.InsertRegionBackendServiceRequest{ - Project: c.project, - Region: c.region, - BackendServiceResource: &computepb.BackendService{ - Name: proto.String(c.backendService), - Protocol: proto.String(computepb.BackendService_Protocol_name[int32(computepb.BackendService_TCP)]), - LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(computepb.BackendService_EXTERNAL)]), - TimeoutSec: proto.Int32(10), - HealthChecks: []string{"https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/healthChecks/" + c.healthCheck}, - Backends: []*computepb.Backend{ - { - BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_CONNECTION)]), - Group: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/zones/" + c.zone + "/instanceGroups/" + c.controlPlaneInstanceGroup), - }, - }, - }, - }) - if err != nil { - return err - } - if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { - return err - } - - c.forwardingRule = c.name + "-" + c.uid - resp, err = c.forwardingRulesAPI.Insert(ctx, &computepb.InsertForwardingRuleRequest{ - Project: c.project, - Region: c.region, - ForwardingRuleResource: &computepb.ForwardingRule{ - Name: proto.String(c.forwardingRule), - IPProtocol: proto.String(computepb.ForwardingRule_IPProtocolEnum_name[int32(computepb.ForwardingRule_TCP)]), - LoadBalancingScheme: proto.String(computepb.ForwardingRule_LoadBalancingScheme_name[int32(computepb.ForwardingRule_EXTERNAL)]), - Ports: []string{"6443", "9000", strconv.Itoa(constants.VerifyServiceNodePortGRPC)}, - BackendService: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/backendServices/" + c.backendService), - }, - }) - if err != nil { - return err - } - if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { - return err - } - - forwardingRule, err := c.forwardingRulesAPI.Get(ctx, &computepb.GetForwardingRuleRequest{ - Project: c.project, - Region: c.region, - ForwardingRule: c.forwardingRule, - }) - if err != nil { - return err - } - if forwardingRule.LabelFingerprint == nil { - return fmt.Errorf("forwarding rule %s has no label fingerprint", c.forwardingRule) - } - - resp, err = c.forwardingRulesAPI.SetLabels(ctx, &computepb.SetLabelsForwardingRuleRequest{ - Project: c.project, - Region: c.region, - Resource: c.forwardingRule, - RegionSetLabelsRequestResource: &computepb.RegionSetLabelsRequest{ - Labels: map[string]string{"constellation-uid": c.uid}, - LabelFingerprint: forwardingRule.LabelFingerprint, - }, - }) - if err != nil { - return err - } - - return c.waitForOperations(ctx, []Operation{resp}) -} - -// TerminateLoadBalancer removes the load balancer and its associated resources. -func (c *Client) TerminateLoadBalancer(ctx context.Context) error { - resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteForwardingRuleRequest{ - Project: c.project, - Region: c.region, - ForwardingRule: c.forwardingRule, - }) - if err != nil && !isNotFoundError(err) { - return err - } - if err == nil { - if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { - return err - } - } - - resp, err = c.backendServicesAPI.Delete(ctx, &computepb.DeleteRegionBackendServiceRequest{ - Project: c.project, - Region: c.region, - BackendService: c.backendService, - }) - if err != nil && !isNotFoundError(err) { - return err - } - if err == nil { - if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { - return err - } - } - - resp, err = c.healthChecksAPI.Delete(ctx, &computepb.DeleteRegionHealthCheckRequest{ - Project: c.project, - Region: c.region, - HealthCheck: c.healthCheck, - }) - if err != nil && !isNotFoundError(err) { - return err - } - - if err == nil { - if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { - return err - } - } - - return nil -} diff --git a/cli/internal/gcp/client/network_test.go b/cli/internal/gcp/client/network_test.go index 105aa0bbb..004455567 100644 --- a/cli/internal/gcp/client/network_test.go +++ b/cli/internal/gcp/client/network_test.go @@ -9,8 +9,6 @@ import ( "github.com/edgelesssys/constellation/internal/cloud/cloudtypes" "github.com/stretchr/testify/assert" "google.golang.org/api/googleapi" - "google.golang.org/genproto/googleapis/cloud/compute/v1" - "google.golang.org/protobuf/proto" ) func TestCreateVPCs(t *testing.T) { @@ -351,183 +349,3 @@ func TestTerminateFirewall(t *testing.T) { }) } } - -func TestCreateLoadBalancer(t *testing.T) { - someErr := errors.New("failed") - testCases := map[string]struct { - operationRegionAPI operationRegionAPI - healthChecksAPI healthChecksAPI - backendServicesAPI backendServicesAPI - forwardingRulesAPI forwardingRulesAPI - wantErr bool - }{ - "successful create": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{}, - }, - "CreateLoadBalancer fails when getting forwarding rule": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{getErr: someErr}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "CreateLoadBalancer fails when label fingerprint is missing": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{}}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "CreateLoadBalancer fails when creating health check": { - healthChecksAPI: stubHealthChecksAPI{insertErr: someErr}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "CreateLoadBalancer fails when creating backend service": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{insertErr: someErr}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "CreateLoadBalancer fails when creating forwarding rule": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{insertErr: someErr}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "CreateLoadBalancer fails when waiting on operation": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{waitErr: someErr}, - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - ctx := context.Background() - client := Client{ - project: "project", - zone: "zone", - name: "name", - uid: "uid", - backendServicesAPI: tc.backendServicesAPI, - forwardingRulesAPI: tc.forwardingRulesAPI, - healthChecksAPI: tc.healthChecksAPI, - operationRegionAPI: tc.operationRegionAPI, - } - - if tc.wantErr { - assert.Error(client.CreateLoadBalancer(ctx)) - } else { - assert.NoError(client.CreateLoadBalancer(ctx)) - assert.NotEmpty(client.healthCheck) - assert.NotEmpty(client.backendService) - assert.NotEmpty(client.forwardingRule) - } - }) - } -} - -func TestTerminateLoadBalancer(t *testing.T) { - someErr := errors.New("failed") - notFoundErr := &googleapi.Error{Code: http.StatusNotFound} - - testCases := map[string]struct { - operationRegionAPI operationRegionAPI - healthChecksAPI healthChecksAPI - backendServicesAPI backendServicesAPI - forwardingRulesAPI forwardingRulesAPI - wantErr bool - }{ - "successful terminate": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - }, - "successful terminate when health check not found": { - healthChecksAPI: stubHealthChecksAPI{deleteErr: notFoundErr}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - }, - "successful terminate when backend service not found": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{deleteErr: notFoundErr}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - }, - "successful terminate when forwarding rule not found": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: notFoundErr}, - operationRegionAPI: stubOperationRegionAPI{}, - }, - "TerminateLoadBalancer fails when deleting health check": { - healthChecksAPI: stubHealthChecksAPI{deleteErr: someErr}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "TerminateLoadBalancer fails when deleting backend service": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "TerminateLoadBalancer fails when deleting forwarding rule": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, - }, - "TerminateLoadBalancer fails when waiting on operation": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{waitErr: someErr}, - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - ctx := context.Background() - client := Client{ - project: "project", - zone: "zone", - name: "name", - uid: "uid", - backendServicesAPI: tc.backendServicesAPI, - forwardingRulesAPI: tc.forwardingRulesAPI, - healthChecksAPI: tc.healthChecksAPI, - operationRegionAPI: tc.operationRegionAPI, - } - - if tc.wantErr { - assert.Error(client.TerminateLoadBalancer(ctx)) - } else { - assert.NoError(client.TerminateLoadBalancer(ctx)) - assert.Empty(client.healthCheck) - assert.Empty(client.backendService) - assert.Empty(client.forwardingRule) - } - }) - } -} diff --git a/cli/internal/gcp/client/operation.go b/cli/internal/gcp/client/operation.go index 7cb3b7210..14c707ce2 100644 --- a/cli/internal/gcp/client/operation.go +++ b/cli/internal/gcp/client/operation.go @@ -37,6 +37,9 @@ func (c *Client) waitForGlobalOperation(ctx context.Context, op Operation) error if err := ctx.Err(); err != nil { return err } + if op.Proto().Name == nil { + return errors.New("operation name is nil") + } waitReq := &computepb.WaitGlobalOperationRequest{ Operation: *op.Proto().Name, Project: c.project, @@ -59,6 +62,9 @@ func (c *Client) waitForZoneOperation(ctx context.Context, op Operation) error { if err := ctx.Err(); err != nil { return err } + if op.Proto().Name == nil { + return errors.New("operation name is nil") + } waitReq := &computepb.WaitZoneOperationRequest{ Operation: *op.Proto().Name, Project: c.project, @@ -82,6 +88,9 @@ func (c *Client) waitForRegionOperation(ctx context.Context, op Operation) error if err := ctx.Err(); err != nil { return err } + if op.Proto().Name == nil { + return errors.New("operation name is nil") + } waitReq := &computepb.WaitRegionOperationRequest{ Operation: *op.Proto().Name, Project: c.project, diff --git a/debugd/cdbg/cmd/deploy.go b/debugd/cdbg/cmd/deploy.go index 61d728299..20582347b 100644 --- a/debugd/cdbg/cmd/deploy.go +++ b/debugd/cdbg/cmd/deploy.go @@ -7,6 +7,7 @@ import ( "io/fs" "log" "net" + "strconv" "github.com/edgelesssys/constellation/debugd/bootstrapper" "github.com/edgelesssys/constellation/debugd/cdbg/config" @@ -94,7 +95,7 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c for _, ip := range ips { input := deployOnEndpointInput{ - debugdEndpoint: net.JoinHostPort(ip, debugd.DebugdPort), + debugdEndpoint: net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)), bootstrapperPath: debugConfig.ConstellationDebugConfig.BootstrapperPath, reader: reader, authorizedKeys: debugConfig.ConstellationDebugConfig.AuthorizedKeys, @@ -194,13 +195,13 @@ func getIPsFromConfig(stat statec.ConstellationState, config configc.Config) ([] // add bootstrapper IP if it is not already in the list var foundBootstrapperIP bool for _, ip := range ips { - if ip == stat.BootstrapperHost { + if ip == stat.LoadBalancerIP { foundBootstrapperIP = true break } } - if !foundBootstrapperIP && stat.BootstrapperHost != "" { - ips = append(ips, stat.BootstrapperHost) + if !foundBootstrapperIP && stat.LoadBalancerIP != "" { + ips = append(ips, stat.LoadBalancerIP) } if len(ips) == 0 { return nil, fmt.Errorf("no public IPs found in statefile") diff --git a/debugd/debugd/cmd/debugd/debugd.go b/debugd/debugd/cmd/debugd/debugd.go index d445b4b8a..120f868f8 100644 --- a/debugd/debugd/cmd/debugd/debugd.go +++ b/debugd/debugd/cmd/debugd/debugd.go @@ -1,6 +1,7 @@ package main import ( + "context" "flag" "fmt" "net" @@ -16,9 +17,9 @@ import ( platform "github.com/edgelesssys/constellation/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/user" + "github.com/edgelesssys/constellation/internal/iproute" "github.com/edgelesssys/constellation/internal/logger" "github.com/spf13/afero" - "golang.org/x/net/context" ) const debugBanner = ` @@ -29,10 +30,9 @@ const debugBanner = ` ` func main() { - wg := &sync.WaitGroup{} verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) - flag.Parse() + log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) fs := afero.NewOsFs() streamer := bootstrapper.NewFileStreamer(fs) @@ -49,15 +49,19 @@ func main() { case platform.Azure: azureFetcher, err := cloudprovider.NewAzure(ctx) if err != nil { - panic(err) + log.Fatalf("%s", err) } fetcher = azureFetcher case platform.GCP: gcpFetcher, err := cloudprovider.NewGCP(ctx) if err != nil { - panic(err) + log.Fatalf("%s", err) } fetcher = gcpFetcher + if err := setLoadbalancerRoute(ctx, fetcher); err != nil { + log.Errorf("adding load balancer IP to local routing table: %s", err) + } + log.Infof("Added load balancer IP to local routing table") case platform.QEMU: fetcher = cloudprovider.NewQEMU() default: @@ -67,11 +71,13 @@ func main() { sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, ssh, download) serv := server.New(log.Named("server"), ssh, serviceManager, streamer) if err := deploy.DeployDefaultServiceUnit(ctx, serviceManager); err != nil { - panic(err) + log.Fatalf("%s", err) } writeDebugBanner(log) + wg := &sync.WaitGroup{} + wg.Add(1) go sched.Start(ctx, wg) wg.Add(1) @@ -91,3 +97,11 @@ func writeDebugBanner(log *logger.Logger) { log.Infof("Unable to print to /dev/ttyS0: %v", err) } } + +func setLoadbalancerRoute(ctx context.Context, fetcher metadata.Fetcher) error { + ip, err := fetcher.DiscoverLoadbalancerIP(ctx) + if err != nil { + return err + } + return iproute.AddToLocalRoutingTable(ctx, ip) +} diff --git a/debugd/debugd/constants.go b/debugd/debugd/constants.go index bd7197399..6a4f76fad 100644 --- a/debugd/debugd/constants.go +++ b/debugd/debugd/constants.go @@ -4,7 +4,6 @@ import "time" const ( DebugdMetadataFlag = "constellation-debugd" - DebugdPort = "4000" GRPCTimeout = 5 * time.Minute SSHCheckInterval = 30 * time.Second DiscoverDebugdInterval = 30 * time.Second diff --git a/debugd/debugd/deploy/download.go b/debugd/debugd/deploy/download.go index 3a4d3e35a..19f1d6cb7 100644 --- a/debugd/debugd/deploy/download.go +++ b/debugd/debugd/deploy/download.go @@ -4,11 +4,13 @@ import ( "context" "fmt" "net" + "strconv" "time" "github.com/edgelesssys/constellation/debugd/bootstrapper" "github.com/edgelesssys/constellation/debugd/debugd" pb "github.com/edgelesssys/constellation/debugd/service" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/logger" "go.uber.org/zap" "google.golang.org/grpc" @@ -38,7 +40,8 @@ func New(log *logger.Logger, dialer NetDialer, serviceManager serviceManager, wr // DownloadBootstrapper will open a new grpc connection to another instance, attempting to download a bootstrapper from that instance. func (d *Download) DownloadBootstrapper(ctx context.Context, ip string) error { log := d.log.With(zap.String("ip", ip)) - serverAddr := net.JoinHostPort(ip, debugd.DebugdPort) + serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) + // only retry download from same endpoint after backoff if lastAttempt, ok := d.attemptedDownloads[serverAddr]; ok && time.Since(lastAttempt) < debugd.BootstrapperDownloadRetryBackoff { return fmt.Errorf("download failed too recently: %v / %v", time.Since(lastAttempt), debugd.BootstrapperDownloadRetryBackoff) diff --git a/debugd/debugd/deploy/download_test.go b/debugd/debugd/deploy/download_test.go index 5f2237830..f61248c2a 100644 --- a/debugd/debugd/deploy/download_test.go +++ b/debugd/debugd/deploy/download_test.go @@ -6,12 +6,14 @@ import ( "fmt" "io" "net" + "strconv" "testing" "time" "github.com/edgelesssys/constellation/debugd/bootstrapper" "github.com/edgelesssys/constellation/debugd/debugd" pb "github.com/edgelesssys/constellation/debugd/service" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/grpc/testdialer" "github.com/edgelesssys/constellation/internal/logger" "github.com/stretchr/testify/assert" @@ -59,7 +61,7 @@ func TestDownloadBootstrapper(t *testing.T) { chunks: [][]byte{[]byte("test")}, }, attemptedDownloads: map[string]time.Time{ - "192.0.2.0:4000": time.Now(), + "192.0.2.0:" + strconv.Itoa(constants.DebugdPort): time.Now(), }, wantDownloadErr: true, }, @@ -98,7 +100,7 @@ func TestDownloadBootstrapper(t *testing.T) { grpcServ := grpc.NewServer() pb.RegisterDebugdServer(grpcServ, &tc.server) - lis := dialer.GetListener(net.JoinHostPort(ip, debugd.DebugdPort)) + lis := dialer.GetListener(net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort))) go grpcServ.Serve(lis) download := &Download{ diff --git a/debugd/debugd/metadata/cloudprovider/cloudprovider.go b/debugd/debugd/metadata/cloudprovider/cloudprovider.go index da2f594e1..64f470b9d 100644 --- a/debugd/debugd/metadata/cloudprovider/cloudprovider.go +++ b/debugd/debugd/metadata/cloudprovider/cloudprovider.go @@ -3,6 +3,7 @@ package cloudprovider import ( "context" "fmt" + "net" azurecloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/azure" gcpcloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/gcp" @@ -16,6 +17,8 @@ type providerMetadata interface { List(ctx context.Context) ([]metadata.InstanceMetadata, error) // Self retrieves the current instance. Self(ctx context.Context) (metadata.InstanceMetadata, error) + // GetLoadBalancerEndpoint returns the endpoint of the load balancer. + GetLoadBalancerEndpoint(ctx context.Context) (string, error) } // Fetcher checks the metadata service to search for instances that were set up for debugging and cloud provider specific SSH keys. @@ -80,6 +83,25 @@ func (f *Fetcher) DiscoverDebugdIPs(ctx context.Context) ([]string, error) { return ips, nil } +func (f *Fetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) { + lbEndpoint, err := f.metaAPI.GetLoadBalancerEndpoint(ctx) + if err != nil { + return "", fmt.Errorf("retrieving load balancer endpoint: %w", err) + } + + // The port of the endpoint is not the port we need. We need to strip it off. + // + // TODO: Tag the specific load balancer we are looking for with a distinct tag. + // Change the GetLoadBalancerEndpoint method to return the endpoint of a load + // balancer with a given tag. + lbIP, _, err := net.SplitHostPort(lbEndpoint) + if err != nil { + return "", fmt.Errorf("parsing load balancer endpoint: %w", err) + } + + return lbIP, nil +} + // FetchSSHKeys will query the metadata of the current instance and deploys any SSH keys found. func (f *Fetcher) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) { self, err := f.metaAPI.Self(ctx) diff --git a/debugd/debugd/metadata/cloudprovider/cloudprovider_test.go b/debugd/debugd/metadata/cloudprovider/cloudprovider_test.go index ce5db566f..cfb448e8f 100644 --- a/debugd/debugd/metadata/cloudprovider/cloudprovider_test.go +++ b/debugd/debugd/metadata/cloudprovider/cloudprovider_test.go @@ -73,6 +73,50 @@ func TestDiscoverDebugIPs(t *testing.T) { } } +func TestDiscoverLoadbalancerIP(t *testing.T) { + ip := "192.0.2.1" + endpoint := ip + ":1234" + someErr := errors.New("failed") + + testCases := map[string]struct { + metaAPI providerMetadata + wantIP string + wantErr bool + }{ + "discovery works": { + metaAPI: &stubMetadata{getLBEndpointRes: endpoint}, + wantIP: ip, + }, + "get endpoint fails": { + metaAPI: &stubMetadata{getLBEndpointErr: someErr}, + wantErr: true, + }, + "invalid endpoint": { + metaAPI: &stubMetadata{getLBEndpointRes: "invalid"}, + wantErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert := assert.New(t) + + fetcher := &Fetcher{ + metaAPI: tc.metaAPI, + } + + ip, err := fetcher.DiscoverLoadbalancerIP(context.Background()) + + if tc.wantErr { + assert.Error(err) + } else { + assert.NoError(err) + assert.Equal(tc.wantIP, ip) + } + }) + } +} + func TestFetchSSHKeys(t *testing.T) { err := errors.New("some err") @@ -125,13 +169,15 @@ func TestFetchSSHKeys(t *testing.T) { } type stubMetadata struct { - listRes []metadata.InstanceMetadata - listErr error - selfRes metadata.InstanceMetadata - selfErr error - getInstanceRes metadata.InstanceMetadata - getInstanceErr error - supportedRes bool + listRes []metadata.InstanceMetadata + listErr error + selfRes metadata.InstanceMetadata + selfErr error + getInstanceRes metadata.InstanceMetadata + getInstanceErr error + getLBEndpointRes string + getLBEndpointErr error + supportedRes bool } func (m *stubMetadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error) { @@ -146,6 +192,10 @@ func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (meta return m.getInstanceRes, m.getInstanceErr } +func (m *stubMetadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) { + return m.getLBEndpointRes, m.getLBEndpointErr +} + func (m *stubMetadata) Supported() bool { return m.supportedRes } diff --git a/debugd/debugd/metadata/fallback/fallback.go b/debugd/debugd/metadata/fallback/fallback.go index 59d495ed9..7b879bd58 100644 --- a/debugd/debugd/metadata/fallback/fallback.go +++ b/debugd/debugd/metadata/fallback/fallback.go @@ -14,6 +14,11 @@ func (f Fetcher) DiscoverDebugdIPs(ctx context.Context) ([]string, error) { return nil, nil } +func (f Fetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) { + // Fallback fetcher does not try to discover loadbalancer IP + return "", nil +} + func (f Fetcher) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) { // Fallback fetcher does not try to fetch ssh keys return nil, nil diff --git a/debugd/debugd/metadata/scheduler.go b/debugd/debugd/metadata/scheduler.go index 9bc922c86..f3749cddb 100644 --- a/debugd/debugd/metadata/scheduler.go +++ b/debugd/debugd/metadata/scheduler.go @@ -17,6 +17,7 @@ import ( type Fetcher interface { DiscoverDebugdIPs(ctx context.Context) ([]string, error) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) + DiscoverLoadbalancerIP(ctx context.Context) (string, error) } // Scheduler schedules fetching of metadata using timers. diff --git a/debugd/debugd/metadata/scheduler_test.go b/debugd/debugd/metadata/scheduler_test.go index 658bc2f4e..3de7a4556 100644 --- a/debugd/debugd/metadata/scheduler_test.go +++ b/debugd/debugd/metadata/scheduler_test.go @@ -117,6 +117,10 @@ func (s *stubFetcher) FetchSSHKeys(ctx context.Context) ([]ssh.UserKey, error) { return s.keys, s.fetchSSHKeysErr } +func (s *stubFetcher) DiscoverLoadbalancerIP(ctx context.Context) (string, error) { + return "", nil +} + type stubSSHDeployer struct { sshKeys []ssh.UserKey diff --git a/debugd/debugd/server/server.go b/debugd/debugd/server/server.go index 6f4cecf63..2e1630e1a 100644 --- a/debugd/debugd/server/server.go +++ b/debugd/debugd/server/server.go @@ -6,16 +6,20 @@ import ( "fmt" "io/fs" "net" + "strconv" "sync" + "time" "github.com/edgelesssys/constellation/debugd/bootstrapper" "github.com/edgelesssys/constellation/debugd/debugd" "github.com/edgelesssys/constellation/debugd/debugd/deploy" pb "github.com/edgelesssys/constellation/debugd/service" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/logger" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) type debugdServer struct { @@ -113,9 +117,13 @@ func Start(log *logger.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) { grpcLog := log.Named("gRPC") grpcLog.WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger() - grpcServer := grpc.NewServer(grpcLog.GetServerStreamInterceptor(), grpcLog.GetServerUnaryInterceptor()) + grpcServer := grpc.NewServer( + grpcLog.GetServerStreamInterceptor(), + grpcLog.GetServerUnaryInterceptor(), + grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), + ) pb.RegisterDebugdServer(grpcServer, serv) - lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", debugd.DebugdPort)) + lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.DebugdPort))) if err != nil { log.With(zap.Error(err)).Fatalf("Listening failed") } diff --git a/debugd/debugd/server/server_test.go b/debugd/debugd/server/server_test.go index 60bc28c1f..ad78b4c8b 100644 --- a/debugd/debugd/server/server_test.go +++ b/debugd/debugd/server/server_test.go @@ -6,11 +6,13 @@ import ( "fmt" "io" "net" + "strconv" "testing" "github.com/edgelesssys/constellation/debugd/bootstrapper" "github.com/edgelesssys/constellation/debugd/debugd/deploy" pb "github.com/edgelesssys/constellation/debugd/service" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/grpc/testdialer" "github.com/edgelesssys/constellation/internal/logger" @@ -26,7 +28,7 @@ func TestMain(m *testing.M) { } func TestUploadAuthorizedKeys(t *testing.T) { - endpoint := "192.0.2.1:4000" + endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort) testCases := map[string]struct { ssh stubSSHDeployer @@ -105,7 +107,7 @@ func TestUploadAuthorizedKeys(t *testing.T) { } func TestUploadBootstrapper(t *testing.T) { - endpoint := "192.0.2.1:4000" + endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort) testCases := map[string]struct { ssh stubSSHDeployer @@ -190,7 +192,8 @@ func TestUploadBootstrapper(t *testing.T) { } func TestDownloadBootstrapper(t *testing.T) { - endpoint := "192.0.2.1:4000" + endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort) + testCases := map[string]struct { ssh stubSSHDeployer serviceManager stubServiceManager @@ -253,7 +256,8 @@ func TestDownloadBootstrapper(t *testing.T) { } func TestUploadSystemServiceUnits(t *testing.T) { - endpoint := "192.0.2.1:4000" + endpoint := "192.0.2.1:" + strconv.Itoa(constants.DebugdPort) + testCases := map[string]struct { ssh stubSSHDeployer serviceManager stubServiceManager diff --git a/go.mod b/go.mod index 894df693e..49c9f4396 100644 --- a/go.mod +++ b/go.mod @@ -87,7 +87,7 @@ require ( golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 google.golang.org/api v0.85.0 google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad - google.golang.org/grpc v1.47.0 + google.golang.org/grpc v1.48.0 google.golang.org/protobuf v1.28.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.24.3 diff --git a/go.sum b/go.sum index 2478796ab..9326545f8 100644 --- a/go.sum +++ b/go.sum @@ -2335,8 +2335,9 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/internal/constants/constants.go b/internal/constants/constants.go index b2a6f93da..c151f6030 100644 --- a/internal/constants/constants.go +++ b/internal/constants/constants.go @@ -35,14 +35,16 @@ const ( // KMSPort is the port the KMS server listens on. KMSPort = 9000 BootstrapperPort = 9000 + KubernetesPort = 6443 + RecoveryPort = 9000 EnclaveSSHPort = 2222 SSHPort = 22 NVMEOverTCPPort = 8009 + DebugdPort = 4000 // Default NodePort Range // https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - NodePortFrom = 30000 - NodePortTo = 32767 - KubernetesPort = 6443 + NodePortFrom = 30000 + NodePortTo = 32767 // // Filenames. diff --git a/internal/iproute/route.go b/internal/iproute/route.go new file mode 100644 index 000000000..dacda95a9 --- /dev/null +++ b/internal/iproute/route.go @@ -0,0 +1,44 @@ +package iproute + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strings" +) + +// AddToLocalRoutingTable adds the IP to the local routing table. +func AddToLocalRoutingTable(ctx context.Context, ip string) error { + return manipulateLocalRoutingTable(ctx, "add", ip) +} + +// RemoveFromLocalRoutingTable removes the IPfrom the local routing table. +func RemoveFromLocalRoutingTable(ctx context.Context, ip string) error { + return manipulateLocalRoutingTable(ctx, "del", ip) +} + +func manipulateLocalRoutingTable(ctx context.Context, action string, ip string) error { + // https://github.com/GoogleCloudPlatform/guest-agent/blob/792fce795218633bcbde505fb3457a0b24f26d37/google_guest_agent/addresses.go#L179 + if !strings.Contains(ip, "/") { + ip = ip + "/32" + } + + args := []string{"route", action, "to", "local", ip, "scope", "host", "dev", "ens3", "proto", "66"} + _, err := exec.CommandContext(ctx, "ip", args...).Output() + if err == nil { + return nil + } + var exitErr *exec.ExitError + if !errors.As(err, &exitErr) { + return fmt.Errorf("ip route %s: %w", action, err) + } + if exitErr.ExitCode() == 2 { + // "RTNETLINK answers: File exists" or "RTNETLINK answers: No such process" + // + // Ignore, expected in case of adding an existing route or deleting a route + // that does not exist. + return nil + } + return fmt.Errorf("ip route %s (code %v) with: %s", action, exitErr.ExitCode(), exitErr.Stderr) +} diff --git a/internal/state/state.go b/internal/state/state.go index aa5a6499f..bfc483c46 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -6,10 +6,10 @@ import ( // ConstellationState is the state of a Constellation. type ConstellationState struct { - Name string `json:"name,omitempty"` - UID string `json:"uid,omitempty"` - CloudProvider string `json:"cloudprovider,omitempty"` - BootstrapperHost string `json:"bootstrapperhost,omitempty"` + Name string `json:"name,omitempty"` + UID string `json:"uid,omitempty"` + CloudProvider string `json:"cloudprovider,omitempty"` + LoadBalancerIP string `json:"bootstrapperhost,omitempty"` GCPWorkerInstances cloudtypes.Instances `json:"gcpworkers,omitempty"` GCPControlPlaneInstances cloudtypes.Instances `json:"gcpcontrolplanes,omitempty"` @@ -20,9 +20,8 @@ type ConstellationState struct { GCPNetwork string `json:"gcpnetwork,omitempty"` GCPSubnetwork string `json:"gcpsubnetwork,omitempty"` GCPFirewalls []string `json:"gcpfirewalls,omitempty"` - GCPBackendService string `json:"gcpbackendservice,omitempty"` - GCPHealthCheck string `json:"gcphealthcheck,omitempty"` - GCPForwardingRule string `json:"gcpforwardingrule,omitempty"` + GCPLoadbalancerIPname string `json:"gcploadbalanceripid,omitempty"` + GCPLoadbalancers []string `json:"gcploadbalancers,omitempty"` GCPProject string `json:"gcpproject,omitempty"` GCPZone string `json:"gcpzone,omitempty"` GCPRegion string `json:"gcpregion,omitempty"` diff --git a/mount/kms/constellation.go b/mount/kms/constellation.go index 1409670dd..7da9445fa 100644 --- a/mount/kms/constellation.go +++ b/mount/kms/constellation.go @@ -18,7 +18,7 @@ type ConstellationKMS struct { // NewConstellationKMS initializes a ConstellationKMS. func NewConstellationKMS(endpoint string) *ConstellationKMS { return &ConstellationKMS{ - endpoint: endpoint, // default: "kms.kube-system:9000" + endpoint: endpoint, // default: "kms.kube-system:port" kms: &constellationKMSClient{}, } } diff --git a/state/internal/setup/setup.go b/state/internal/setup/setup.go index be0fbb0e3..7ca56cab4 100644 --- a/state/internal/setup/setup.go +++ b/state/internal/setup/setup.go @@ -6,11 +6,13 @@ import ( "net" "os" "path/filepath" + "strconv" "syscall" "github.com/edgelesssys/constellation/bootstrapper/nodestate" "github.com/edgelesssys/constellation/internal/attestation" "github.com/edgelesssys/constellation/internal/attestation/vtpm" + "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/crypto" "github.com/edgelesssys/constellation/internal/file" "github.com/edgelesssys/constellation/internal/logger" @@ -20,7 +22,6 @@ import ( ) const ( - RecoveryPort = "9000" keyPath = "/run/cryptsetup-keys.d" keyFile = "state.key" stateDiskMappedName = "state" @@ -63,8 +64,9 @@ func (s *SetupManager) PrepareExistingDisk() error { s.log.Infof("Preparing existing state disk") uuid := s.mapper.DiskUUID() + endpoint := net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.RecoveryPort)) getKey: - passphrase, measurementSecret, err := s.keyWaiter.WaitForDecryptionKey(uuid, net.JoinHostPort("0.0.0.0", RecoveryPort)) + passphrase, measurementSecret, err := s.keyWaiter.WaitForDecryptionKey(uuid, endpoint) if err != nil { return err } diff --git a/verify/server/server.go b/verify/server/server.go index a420a9647..fff247cb8 100644 --- a/verify/server/server.go +++ b/verify/server/server.go @@ -8,6 +8,7 @@ import ( "net" "net/http" "sync" + "time" "github.com/edgelesssys/constellation/internal/logger" "github.com/edgelesssys/constellation/verify/verifyproto" @@ -15,6 +16,7 @@ import ( "go.uber.org/zap/zapcore" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) @@ -48,7 +50,10 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error { var once sync.Once s.log.WithIncreasedLevel(zapcore.WarnLevel).Named("grpc").ReplaceGRPCLogger() - grpcServer := grpc.NewServer(s.log.Named("gRPC").GetServerUnaryInterceptor()) + grpcServer := grpc.NewServer( + s.log.Named("gRPC").GetServerUnaryInterceptor(), + grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), + ) verifyproto.RegisterAPIServer(grpcServer, s) httpHandler := http.NewServeMux()