diff --git a/.github/workflows/build-bootstrapper.yml b/.github/workflows/build-bootstrapper.yml index 18f383bcf..fabaea097 100644 --- a/.github/workflows/build-bootstrapper.yml +++ b/.github/workflows/build-bootstrapper.yml @@ -1,4 +1,4 @@ -# We build the coordinator as part of each PR to see that the build still works. An image is only created once merged to main (see condition on call-coreos). +# We build the bootstrapper as part of each PR to see that the build still works. An image is only created once merged to main (see condition on call-coreos). name: Build and Upload the bootstrapper on: @@ -45,7 +45,7 @@ jobs: - name: Copy bootstrapper to S3 if not exists id: copy - # Only upload the Coordinator if this action is triggered from main branch + # Only upload the bootstrapper if this action is triggered from main branch if: ${{ github.ref == 'refs/heads/main' }} run: > aws s3api head-object --bucket ${{ secrets.PUBLIC_BUCKET_NAME }} --key bootstrapper/$(ls | grep "bootstrapper-") diff --git a/bootstrapper/README.md b/bootstrapper/README.md index 5203f7322..046e62f50 100644 --- a/bootstrapper/README.md +++ b/bootstrapper/README.md @@ -20,12 +20,11 @@ calling the InitCluster function of our Kubernetes library, which does a `kubead ## Join Flow The JoinClient is a gRPC client that is trying to connect to an JoinService, which might be running -in an already existing cluster as DaemonSet. If the JoinClient can connect to the JoinService, it tries -to issue a join ticket. The JoinService is validating the instance which wants to join the cluster using +in an already existing cluster as DaemonSet. The JoinService is validating the instance which wants to join the cluster using aTLS. For details on the used protocol and the verification of a joining instances measurements, see the [joinservice](./../joinservice) package. -If the JOinSerivce successfully verifies the instance, it issues a join ticket. The JoinClient then +If the JoinService successfully verifies the instance, it issues a join ticket. The JoinClient then joins the cluster by calling the `kubeadm join` command, using the token and other needed information from the join ticket. diff --git a/bootstrapper/cloudprovider/azure/ccm.go b/bootstrapper/cloudprovider/azure/ccm.go index 2353fcb10..abeb9d7fc 100644 --- a/bootstrapper/cloudprovider/azure/ccm.go +++ b/bootstrapper/cloudprovider/azure/ccm.go @@ -98,7 +98,7 @@ func (c *CloudControllerManager) Secrets(ctx context.Context, providerID string, SecurityGroupName: securityGroupName, LoadBalancerName: loadBalancerName, UseInstanceMetadata: true, - VmType: vmType, + VMType: vmType, Location: creds.Location, AADClientID: creds.ClientID, AADClientSecret: creds.ClientSecret, @@ -177,7 +177,7 @@ type cloudConfig struct { VNetResourceGroup string `json:"vnetResourceGroup,omitempty"` CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"` UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty"` - VmType string `json:"vmType,omitempty"` + VMType string `json:"vmType,omitempty"` AADClientID string `json:"aadClientId,omitempty"` AADClientSecret string `json:"aadClientSecret,omitempty"` } diff --git a/bootstrapper/cmd/bootstrapper/run.go b/bootstrapper/cmd/bootstrapper/run.go index 274d79f48..63ff05a69 100644 --- a/bootstrapper/cmd/bootstrapper/run.go +++ b/bootstrapper/cmd/bootstrapper/run.go @@ -3,6 +3,7 @@ package main import ( "net" + "github.com/edgelesssys/constellation/bootstrapper/internal/exit" "github.com/edgelesssys/constellation/bootstrapper/internal/initserver" "github.com/edgelesssys/constellation/bootstrapper/internal/joinclient" "github.com/edgelesssys/constellation/bootstrapper/internal/logging" @@ -39,19 +40,29 @@ func run(issuer quoteIssuer, tpm vtpm.TPMOpenFunc, fileHandler file.Handler, return } - nodeLock := nodelock.New() + nodeLock := nodelock.New(tpm) initServer := initserver.New(nodeLock, kube, issuer, fileHandler, logger) dialer := dialer.New(issuer, nil, &net.Dialer{}) joinClient := joinclient.New(nodeLock, dialer, kube, metadata, logger) - joinClient.Start() + cleaner := exit.New(). + With(initServer). + With(joinClient) + joinClient.Start(cleaner) - if err := initServer.Serve(bindIP, bindPort); err != nil { + if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil { logger.Error("Failed to serve init server", zap.Error(err)) } - joinClient.Stop() + // wait for join client and server to exit cleanly + cleaner.Clean() + + // if node lock was never acquired, then we didn't bootstrap successfully. + if !nodeLock.Locked() { + cloudLogger.Disclose("bootstrapper failed") + logger.Fatal("bootstrapper failed") + } logger.Info("bootstrapper done") cloudLogger.Disclose("bootstrapper done") diff --git a/bootstrapper/internal/exit/exit.go b/bootstrapper/internal/exit/exit.go new file mode 100644 index 000000000..bdbe64f6e --- /dev/null +++ b/bootstrapper/internal/exit/exit.go @@ -0,0 +1,50 @@ +package exit + +import ( + "sync" +) + +type cleaner struct { + stoppers []stopper + + cleanupDone bool + wg sync.WaitGroup + mux sync.Mutex +} + +// New creates a new cleaner. +func New(stoppers ...stopper) *cleaner { + return &cleaner{ + stoppers: stoppers, + } +} + +// With adds a new stopper to the cleaner. +func (c *cleaner) With(stopper stopper) *cleaner { + c.stoppers = append(c.stoppers, stopper) + return c +} + +// Clean stops all services gracefully. +func (c *cleaner) Clean() { + // only cleanup once + c.mux.Lock() + defer c.mux.Unlock() + if c.cleanupDone { + return + } + + c.wg.Add(len(c.stoppers)) + for _, stopItem := range c.stoppers { + go func(stopItem stopper) { + stopItem.Stop() + c.wg.Done() + }(stopItem) + } + c.wg.Wait() + c.cleanupDone = true +} + +type stopper interface { + Stop() +} diff --git a/bootstrapper/internal/exit/exit_test.go b/bootstrapper/internal/exit/exit_test.go new file mode 100644 index 000000000..e36c31558 --- /dev/null +++ b/bootstrapper/internal/exit/exit_test.go @@ -0,0 +1,47 @@ +package exit + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} + +func TestNew(t *testing.T) { + assert := assert.New(t) + + cleaner := New(&spyStopper{}) + assert.NotNil(cleaner) + assert.NotEmpty(cleaner.stoppers) +} + +func TestWith(t *testing.T) { + assert := assert.New(t) + + cleaner := New().With(&spyStopper{}) + assert.NotEmpty(cleaner.stoppers) +} + +func TestClean(t *testing.T) { + assert := assert.New(t) + + stopper := &spyStopper{} + cleaner := New(stopper) + cleaner.Clean() + assert.True(stopper.stopped) + + // call again to make sure it doesn't panic or block + cleaner.Clean() +} + +type spyStopper struct { + stopped bool +} + +func (s *spyStopper) Stop() { + s.stopped = true +} diff --git a/bootstrapper/internal/initserver/initserver.go b/bootstrapper/internal/initserver/initserver.go index db7d26f1a..9b79611f9 100644 --- a/bootstrapper/internal/initserver/initserver.go +++ b/bootstrapper/internal/initserver/initserver.go @@ -9,7 +9,6 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/initproto" "github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes" - "github.com/edgelesssys/constellation/bootstrapper/internal/nodelock" "github.com/edgelesssys/constellation/bootstrapper/nodestate" "github.com/edgelesssys/constellation/bootstrapper/role" "github.com/edgelesssys/constellation/bootstrapper/util" @@ -31,7 +30,7 @@ import ( // The server handles initialization calls from the CLI and initializes the // Kubernetes cluster. type Server struct { - nodeLock *nodelock.Lock + nodeLock locker initializer ClusterInitializer disk encryptedDisk fileHandler file.Handler @@ -43,7 +42,7 @@ type Server struct { } // New creates a new initialization server. -func New(lock *nodelock.Lock, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, logger *zap.Logger) *Server { +func New(lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, logger *zap.Logger) *Server { logger = logger.Named("initServer") server := &Server{ nodeLock: lock, @@ -69,20 +68,32 @@ func New(lock *nodelock.Lock, kube ClusterInitializer, issuer atls.Issuer, fh fi return server } -func (s *Server) Serve(ip, port string) error { +// Serve starts the initialization server. +func (s *Server) Serve(ip, port string, cleaner cleaner) error { lis, err := net.Listen("tcp", net.JoinHostPort(ip, port)) if err != nil { return fmt.Errorf("failed to listen: %w", err) } - return s.grpcServer.Serve(lis) + err = s.grpcServer.Serve(lis) + cleaner.Clean() + return err } // Init initializes the cluster. func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) { s.logger.Info("Init called") - if ok := s.nodeLock.TryLockOnce(); !ok { + id, err := s.deriveAttestationID(req.MasterSecret) + if err != nil { + return nil, status.Errorf(codes.Internal, "%s", err) + } + + nodeLockAcquired, err := s.nodeLock.TryLockOnce(id.Owner, id.Cluster) + if err != nil { + return nil, status.Errorf(codes.Internal, "locking node: %s", err) + } + if !nodeLockAcquired { // The join client seems to already have a connection to an // existing join service. At this point, any further call to // init does not make sense, so we just stop. @@ -93,11 +104,6 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro return nil, status.Error(codes.FailedPrecondition, "node is already being activated") } - id, err := s.deriveAttestationID(req.MasterSecret) - if err != nil { - return nil, status.Errorf(codes.Internal, "%s", err) - } - if err := s.setupDisk(req.MasterSecret); err != nil { return nil, status.Errorf(codes.Internal, "setting up disk: %s", err) } @@ -131,6 +137,7 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro } s.logger.Info("Init succeeded") + go s.grpcServer.GracefulStop() return &initproto.InitResponse{ Kubeconfig: kubeconfig, OwnerId: id.Owner, @@ -138,6 +145,11 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro }, nil } +// Stop stops the initialization server gracefully. +func (s *Server) Stop() { + s.grpcServer.GracefulStop() +} + func (s *Server) setupDisk(masterSecret []byte) error { if err := s.disk.Open(); err != nil { return fmt.Errorf("opening encrypted disk: %w", err) @@ -214,3 +226,13 @@ type serveStopper interface { // GracefulStop stops the server and blocks until all requests are done. GracefulStop() } + +type locker interface { + // TryLockOnce tries to lock the node. If the node is already locked, it + // returns false. If the node is unlocked, it locks it and returns true. + TryLockOnce(ownerID, clusterID []byte) (bool, error) +} + +type cleaner interface { + Clean() +} diff --git a/bootstrapper/internal/initserver/initserver_test.go b/bootstrapper/internal/initserver/initserver_test.go index 4620deff2..52f3674dd 100644 --- a/bootstrapper/internal/initserver/initserver_test.go +++ b/bootstrapper/internal/initserver/initserver_test.go @@ -4,12 +4,12 @@ import ( "context" "errors" "net" + "sync" "testing" "time" "github.com/edgelesssys/constellation/bootstrapper/initproto" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes" - "github.com/edgelesssys/constellation/bootstrapper/internal/nodelock" attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types" "github.com/edgelesssys/constellation/internal/file" "github.com/spf13/afero" @@ -28,7 +28,7 @@ func TestNew(t *testing.T) { assert := assert.New(t) fh := file.NewHandler(afero.NewMemMapFs()) - server := New(nodelock.New(), &stubClusterInitializer{}, nil, fh, zap.NewNop()) + server := New(newFakeLock(), &stubClusterInitializer{}, nil, fh, zap.NewNop()) assert.NotNil(server) assert.NotNil(server.logger) assert.NotNil(server.nodeLock) @@ -40,11 +40,13 @@ func TestNew(t *testing.T) { func TestInit(t *testing.T) { someErr := errors.New("failed") - lockedNodeLock := nodelock.New() - require.True(t, lockedNodeLock.TryLockOnce()) + lockedLock := newFakeLock() + aqcuiredLock, lockErr := lockedLock.TryLockOnce(nil, nil) + require.True(t, aqcuiredLock) + require.Nil(t, lockErr) testCases := map[string]struct { - nodeLock *nodelock.Lock + nodeLock *fakeLock initializer ClusterInitializer disk encryptedDisk fileHandler file.Handler @@ -53,14 +55,14 @@ func TestInit(t *testing.T) { wantShutdown bool }{ "successful init": { - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), initializer: &stubClusterInitializer{}, disk: &stubDisk{}, fileHandler: file.NewHandler(afero.NewMemMapFs()), req: &initproto.InitRequest{}, }, "node locked": { - nodeLock: lockedNodeLock, + nodeLock: lockedLock, initializer: &stubClusterInitializer{}, disk: &stubDisk{}, fileHandler: file.NewHandler(afero.NewMemMapFs()), @@ -69,7 +71,7 @@ func TestInit(t *testing.T) { wantShutdown: true, }, "disk open error": { - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), initializer: &stubClusterInitializer{}, disk: &stubDisk{openErr: someErr}, fileHandler: file.NewHandler(afero.NewMemMapFs()), @@ -77,7 +79,7 @@ func TestInit(t *testing.T) { wantErr: true, }, "disk uuid error": { - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), initializer: &stubClusterInitializer{}, disk: &stubDisk{uuidErr: someErr}, fileHandler: file.NewHandler(afero.NewMemMapFs()), @@ -85,7 +87,7 @@ func TestInit(t *testing.T) { wantErr: true, }, "disk update passphrase error": { - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), initializer: &stubClusterInitializer{}, disk: &stubDisk{updatePassphraseErr: someErr}, fileHandler: file.NewHandler(afero.NewMemMapFs()), @@ -93,7 +95,7 @@ func TestInit(t *testing.T) { wantErr: true, }, "write state file error": { - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), initializer: &stubClusterInitializer{}, disk: &stubDisk{}, fileHandler: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())), @@ -101,7 +103,7 @@ func TestInit(t *testing.T) { wantErr: true, }, "initialize cluster error": { - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), initializer: &stubClusterInitializer{initClusterErr: someErr}, disk: &stubDisk{}, fileHandler: file.NewHandler(afero.NewMemMapFs()), @@ -142,7 +144,7 @@ func TestInit(t *testing.T) { assert.NoError(err) assert.NotNil(kubeconfig) - assert.False(server.nodeLock.TryLockOnce()) // lock should be locked + assert.False(server.nodeLock.TryLockOnce(nil, nil)) // lock should be locked }) } } @@ -237,3 +239,17 @@ func (s *stubServeStopper) Serve(net.Listener) error { func (s *stubServeStopper) GracefulStop() { s.shutdownCalled <- struct{}{} } + +type fakeLock struct { + state *sync.Mutex +} + +func newFakeLock() *fakeLock { + return &fakeLock{ + state: &sync.Mutex{}, + } +} + +func (l *fakeLock) TryLockOnce(_, _ []byte) (bool, error) { + return l.state.TryLock(), nil +} diff --git a/bootstrapper/internal/joinclient/client.go b/bootstrapper/internal/joinclient/client.go index cb708d086..90ae04b77 100644 --- a/bootstrapper/internal/joinclient/client.go +++ b/bootstrapper/internal/joinclient/client.go @@ -10,7 +10,6 @@ import ( "time" "github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption" - "github.com/edgelesssys/constellation/bootstrapper/internal/nodelock" "github.com/edgelesssys/constellation/bootstrapper/nodestate" "github.com/edgelesssys/constellation/bootstrapper/role" "github.com/edgelesssys/constellation/internal/cloud/metadata" @@ -33,7 +32,7 @@ const ( // JoinClient is a client for requesting the needed information and // joining an existing Kubernetes cluster. type JoinClient struct { - nodeLock *nodelock.Lock + nodeLock locker diskUUID string nodeName string role role.Role @@ -57,7 +56,7 @@ type JoinClient struct { } // New creates a new JoinClient. -func New(lock *nodelock.Lock, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *zap.Logger) *JoinClient { +func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *zap.Logger) *JoinClient { return &JoinClient{ nodeLock: lock, disk: diskencryption.New(), @@ -78,7 +77,7 @@ func New(lock *nodelock.Lock, dial grpcDialer, joiner ClusterJoiner, meta Metada // After receiving the needed information, the node will join the cluster. // Multiple calls of start on the same client won't start a second routine if there is // already a routine running. -func (c *JoinClient) Start() { +func (c *JoinClient) Start(cleaner cleaner) { c.mux.Lock() defer c.mux.Unlock() @@ -123,6 +122,7 @@ func (c *JoinClient) Start() { err := c.tryJoinWithAvailableServices() if err == nil { c.log.Info("Joined successfully. Client is shut down.") + go cleaner.Clean() return } else if isUnrecoverable(err) { c.log.Error("Unrecoverable error occurred", zap.Error(err)) @@ -220,7 +220,12 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse) } }() - if ok := c.nodeLock.TryLockOnce(); !ok { + nodeLockAcquired, err := c.nodeLock.TryLockOnce(ticket.OwnerId, ticket.ClusterId) + if err != nil { + c.log.Info("Acquiring node lock failed", zap.Error(err)) + return fmt.Errorf("acquiring node lock: %w", err) + } + if !nodeLockAcquired { // There is already a cluster initialization in progress on // this node, so there is no need to also join the cluster, // as the initializing node is automatically part of the cluster. @@ -359,3 +364,13 @@ type encryptedDisk interface { // UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase. UpdatePassphrase(passphrase string) error } + +type cleaner interface { + Clean() +} + +type locker interface { + // TryLockOnce tries to lock the node. If the node is already locked, it + // returns false. If the node is unlocked, it locks it and returns true. + TryLockOnce(ownerID, clusterID []byte) (bool, error) +} diff --git a/bootstrapper/internal/joinclient/client_test.go b/bootstrapper/internal/joinclient/client_test.go index 5f1d0a90c..6f64f6461 100644 --- a/bootstrapper/internal/joinclient/client_test.go +++ b/bootstrapper/internal/joinclient/client_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/edgelesssys/constellation/bootstrapper/internal/nodelock" "github.com/edgelesssys/constellation/bootstrapper/role" "github.com/edgelesssys/constellation/internal/cloud/metadata" "github.com/edgelesssys/constellation/internal/constants" @@ -35,8 +34,10 @@ func TestMain(m *testing.M) { func TestClient(t *testing.T) { someErr := errors.New("failed") - lockedLock := nodelock.New() - require.True(t, lockedLock.TryLockOnce()) + lockedLock := newFakeLock() + aqcuiredLock, lockErr := lockedLock.TryLockOnce(nil, nil) + require.True(t, aqcuiredLock) + require.Nil(t, lockErr) workerSelf := metadata.InstanceMetadata{Role: role.Worker, Name: "node-1"} controlSelf := metadata.InstanceMetadata{Role: role.ControlPlane, Name: "node-5"} peers := []metadata.InstanceMetadata{ @@ -49,7 +50,7 @@ func TestClient(t *testing.T) { role role.Role clusterJoiner *stubClusterJoiner disk encryptedDisk - nodeLock *nodelock.Lock + nodeLock *fakeLock apiAnswers []any wantLock bool wantJoin bool @@ -65,7 +66,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -81,7 +82,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -97,7 +98,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -113,7 +114,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -130,7 +131,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -147,7 +148,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -160,7 +161,7 @@ func TestClient(t *testing.T) { issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{joinClusterErr: someErr}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, wantLock: true, @@ -180,13 +181,13 @@ func TestClient(t *testing.T) { "on control plane: disk open fails": { role: role.ControlPlane, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{openErr: someErr}, }, "on control plane: disk uuid fails": { role: role.ControlPlane, clusterJoiner: &stubClusterJoiner{}, - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), disk: &stubDisk{uuidErr: someErr}, }, } @@ -224,7 +225,7 @@ func TestClient(t *testing.T) { go joinServer.Serve(listener) defer joinServer.GracefulStop() - client.Start() + client.Start(stubCleaner{}) for _, a := range tc.apiAnswers { switch a := a.(type) { @@ -246,9 +247,9 @@ func TestClient(t *testing.T) { assert.False(tc.clusterJoiner.joinClusterCalled) } if tc.wantLock { - assert.False(client.nodeLock.TryLockOnce()) // lock should be locked + assert.False(client.nodeLock.TryLockOnce(nil, nil)) // lock should be locked } else { - assert.True(client.nodeLock.TryLockOnce()) + assert.True(client.nodeLock.TryLockOnce(nil, nil)) } }) } @@ -258,7 +259,7 @@ func TestClientConcurrentStartStop(t *testing.T) { netDialer := testdialer.NewBufconnDialer() dialer := dialer.New(nil, nil, netDialer) client := &JoinClient{ - nodeLock: nodelock.New(), + nodeLock: newFakeLock(), timeout: 30 * time.Second, interval: 30 * time.Second, dialer: dialer, @@ -274,7 +275,7 @@ func TestClientConcurrentStartStop(t *testing.T) { start := func() { defer wg.Done() - client.Start() + client.Start(stubCleaner{}) } stop := func() { @@ -415,3 +416,21 @@ func (d *stubDisk) UpdatePassphrase(string) error { d.updatePassphraseCalled = true return d.updatePassphraseErr } + +type stubCleaner struct{} + +func (c stubCleaner) Clean() {} + +type fakeLock struct { + state *sync.Mutex +} + +func newFakeLock() *fakeLock { + return &fakeLock{ + state: &sync.Mutex{}, + } +} + +func (l *fakeLock) TryLockOnce(_, _ []byte) (bool, error) { + return l.state.TryLock(), nil +} diff --git a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go index 11bc626e8..ba506d1bc 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go +++ b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go @@ -177,7 +177,7 @@ func (k *KubeadmJoinYAML) SetNodeName(nodeName string) { k.JoinConfiguration.NodeRegistration.Name = nodeName } -func (k *KubeadmJoinYAML) SetApiServerEndpoint(apiServerEndpoint string) { +func (k *KubeadmJoinYAML) SetAPIServerEndpoint(apiServerEndpoint string) { k.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint } @@ -240,7 +240,7 @@ func (k *KubeadmInitYAML) SetCertSANs(certSANs []string) { } } -func (k *KubeadmInitYAML) SetApiServerAdvertiseAddress(apiServerAdvertiseAddress string) { +func (k *KubeadmInitYAML) SetAPIServerAdvertiseAddress(apiServerAdvertiseAddress string) { k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress } diff --git a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go index 7646627b8..3cc941fa0 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go +++ b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go @@ -24,7 +24,7 @@ func TestInitConfiguration(t *testing.T) { "CoreOS init config with all fields can be created": { config: func() KubeadmInitYAML { c := coreOSConfig.InitConfiguration(true) - c.SetApiServerAdvertiseAddress("192.0.2.0") + c.SetAPIServerAdvertiseAddress("192.0.2.0") c.SetNodeIP("192.0.2.0") c.SetNodeName("node") c.SetPodNetworkCIDR("10.244.0.0/16") @@ -62,7 +62,7 @@ func TestJoinConfiguration(t *testing.T) { "CoreOS join config with all fields can be created": { config: func() KubeadmJoinYAML { c := coreOSConfig.JoinConfiguration(true) - c.SetApiServerEndpoint("192.0.2.0:6443") + c.SetAPIServerEndpoint("192.0.2.0:6443") c.SetNodeIP("192.0.2.0") c.SetNodeName("node") c.SetToken("token") diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/image_pull_secret.go b/bootstrapper/internal/kubernetes/k8sapi/resources/image_pull_secret.go index 0704addf9..b68cf2e95 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/image_pull_secret.go +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/image_pull_secret.go @@ -15,7 +15,7 @@ func NewImagePullSecret() k8s.Secret { []byte(fmt.Sprintf("%s:%s", secrets.PullSecretUser, secrets.PullSecretToken)), ) - pullSecretDockerCfgJson := fmt.Sprintf(`{"auths":{"ghcr.io":{"auth":"%s"}}}`, base64EncodedSecret) + pullSecretDockerCfgJSON := fmt.Sprintf(`{"auths":{"ghcr.io":{"auth":"%s"}}}`, base64EncodedSecret) return k8s.Secret{ TypeMeta: meta.TypeMeta{ @@ -26,7 +26,7 @@ func NewImagePullSecret() k8s.Secret { Name: secrets.PullSecretName, Namespace: "kube-system", }, - StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJson}, + StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJSON}, Type: "kubernetes.io/dockerconfigjson", } } diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/secrets.go b/bootstrapper/internal/kubernetes/k8sapi/resources/secrets.go index cb686e011..e768375c5 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/secrets.go +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/secrets.go @@ -5,7 +5,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// ConfigMaps represent a list of k8s Secret. +// Secrets represent a list of k8s Secret. type Secrets []*k8s.Secret // Marshal marshals secrets into multiple YAML documents. diff --git a/bootstrapper/internal/kubernetes/k8sapi/util.go b/bootstrapper/internal/kubernetes/k8sapi/util.go index 070fc9b12..5a6ce9125 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/util.go +++ b/bootstrapper/internal/kubernetes/k8sapi/util.go @@ -13,7 +13,6 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" "go.uber.org/zap" - kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" ) const ( @@ -23,10 +22,7 @@ const ( kubeletStartTimeout = 10 * time.Minute ) -var ( - kubernetesKeyRegexp = regexp.MustCompile("[a-f0-9]{64}") - providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`) -) +var providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`) // Client provides the functionality of `kubectl apply`. type Client interface { @@ -35,28 +31,12 @@ type Client interface { // TODO: add tolerations } -type ClusterUtil interface { - InstallComponents(ctx context.Context, version string) error - InitCluster(initConfig []byte) error - JoinCluster(joinConfig []byte) error - SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error - SetupAccessManager(kubectl Client, accessManagerConfiguration resources.Marshaler) error - SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error - SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error - SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error - SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error - StartKubelet() error - RestartKubelet() error - GetControlPlaneJoinCertificateKey() (string, error) - CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) -} - // KubernetesUtil provides low level management of the kubernetes cluster. type KubernetesUtil struct { inst installer } -// NewKubernetesUtils creates a new KubernetesUtil. +// NewKubernetesUtil creates a new KubernetesUtil. func NewKubernetesUtil() *KubernetesUtil { return &KubernetesUtil{ inst: newOSInstaller(), @@ -91,7 +71,6 @@ func (k *KubernetesUtil) InitCluster(ctx context.Context, initConfig []byte, log if err != nil { return fmt.Errorf("creating init config file %v: %w", initConfigFile.Name(), err) } - // defer os.Remove(initConfigFile.Name()) if _, err := initConfigFile.Write(initConfig); err != nil { return fmt.Errorf("writing kubeadm init yaml config %v: %w", initConfigFile.Name(), err) @@ -296,7 +275,6 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log if err != nil { return fmt.Errorf("creating join config file %v: %w", joinConfigFile.Name(), err) } - // defer os.Remove(joinConfigFile.Name()) if _, err := joinConfigFile.Write(joinConfig); err != nil { return fmt.Errorf("writing kubeadm init yaml config %v: %w", joinConfigFile.Name(), err) @@ -333,40 +311,3 @@ func (k *KubernetesUtil) RestartKubelet() error { defer cancel() return restartSystemdUnit(ctx, "kubelet.service") } - -// GetControlPlaneJoinCertificateKey return the key which can be used in combination with the joinArgs -// to join the Cluster as control-plane. -func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error) { - // Key will be valid for 1h (no option to reduce the duration). - // https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-upload-certs - output, err := exec.CommandContext(ctx, kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output() - if err != nil { - var exitErr *exec.ExitError - if errors.As(err, &exitErr) { - return "", fmt.Errorf("kubeadm upload-certs failed (code %v) with: %s (full err: %s)", exitErr.ExitCode(), exitErr.Stderr, err) - } - return "", fmt.Errorf("kubeadm upload-certs: %w", err) - } - // Example output: - /* - [upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace - [upload-certs] Using certificate key: - 9555b74008f24687eb964bd90a164ecb5760a89481d9c55a77c129b7db438168 - */ - key := kubernetesKeyRegexp.FindString(string(output)) - if key == "" { - return "", fmt.Errorf("failed to parse kubeadm output: %s", string(output)) - } - return key, nil -} - -// CreateJoinToken creates a new bootstrap (join) token. -func (k *KubernetesUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) { - output, err := exec.CommandContext(ctx, kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output() - if err != nil { - return nil, fmt.Errorf("kubeadm token create: %w", err) - } - // `kubeadm token create [...] --print-join-command` outputs the following format: - // kubeadm join [API_SERVER_ENDPOINT] --token [TOKEN] --discovery-token-ca-cert-hash [DISCOVERY_TOKEN_CA_CERT_HASH] - return ParseJoinCommand(string(output)) -} diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index 159451229..ca381c5a5 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -2,12 +2,10 @@ package kubernetes import ( "context" - "time" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" "go.uber.org/zap" - kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" ) type clusterUtil interface { @@ -25,7 +23,5 @@ type clusterUtil interface { SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration resources.Marshaler) error StartKubelet() error RestartKubelet() error - GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error) - CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) FixCilium(nodeNameK8s string) } diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index 6508daa90..cb43cb907 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -7,7 +7,6 @@ import ( "fmt" "os/exec" "strings" - "time" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" @@ -201,7 +200,7 @@ func (k *KubeWrapper) InitCluster( } } - go k.clusterUtil.FixCilium(nodeName) + k.clusterUtil.FixCilium(nodeName) return k.GetKubeconfig() } @@ -236,7 +235,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo // Step 2: configure kubeadm join config joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported()) - joinConfig.SetApiServerEndpoint(args.APIServerEndpoint) + joinConfig.SetAPIServerEndpoint(args.APIServerEndpoint) joinConfig.SetToken(args.Token) joinConfig.AppendDiscoveryTokenCaCertHash(args.CACertHashes[0]) joinConfig.SetNodeIP(nodeInternalIP) @@ -253,7 +252,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err) } - go k.clusterUtil.FixCilium(nodeName) + k.clusterUtil.FixCilium(nodeName) return nil } @@ -263,16 +262,6 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) { return k.kubeconfigReader.ReadKubeconfig() } -// GetKubeadmCertificateKey return the key needed to join the Cluster as Control-Plane (has to be executed on a control-plane; errors otherwise). -func (k *KubeWrapper) GetKubeadmCertificateKey(ctx context.Context) (string, error) { - return k.clusterUtil.GetControlPlaneJoinCertificateKey(ctx) -} - -// GetJoinToken returns a bootstrap (join) token. -func (k *KubeWrapper) GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) { - return k.clusterUtil.CreateJoinToken(ctx, ttl) -} - func (k *KubeWrapper) setupJoinService(csp string, measurementsJSON []byte, id attestationtypes.ID) error { idJSON, err := json.Marshal(id) if err != nil { diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index e78f6fdba..8af243e6c 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -5,7 +5,6 @@ import ( "errors" "regexp" "testing" - "time" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" @@ -26,7 +25,7 @@ func TestMain(m *testing.M) { func TestInitCluster(t *testing.T) { someErr := errors.New("failed") - serviceAccountUri := "some-service-account-uri" + serviceAccountURI := "some-service-account-uri" masterSecret := []byte("some-master-secret") autoscalingNodeGroups := []string{"0,10,autoscaling_group_0"} @@ -270,7 +269,7 @@ func TestInitCluster(t *testing.T) { kubeconfigReader: tc.kubeconfigReader, getIPAddr: func() (string, error) { return privateIP, nil }, } - _, err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, k8sVersion, attestationtypes.ID{}, KMSConfig{MasterSecret: masterSecret}, nil, zaptest.NewLogger(t)) + _, err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountURI, k8sVersion, attestationtypes.ID{}, KMSConfig{MasterSecret: masterSecret}, nil, zaptest.NewLogger(t)) if tc.wantErr { assert.Error(err) @@ -490,8 +489,6 @@ type stubClusterUtil struct { joinClusterErr error startKubeletErr error restartKubeletErr error - createJoinTokenResponse *kubeadm.BootstrapTokenDiscovery - createJoinTokenErr error initConfigs [][]byte joinConfigs [][]byte @@ -555,14 +552,6 @@ func (s *stubClusterUtil) RestartKubelet() error { return s.restartKubeletErr } -func (s *stubClusterUtil) GetControlPlaneJoinCertificateKey(context.Context) (string, error) { - return "", nil -} - -func (s *stubClusterUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) { - return s.createJoinTokenResponse, s.createJoinTokenErr -} - func (s *stubClusterUtil) FixCilium(nodeName string) { } diff --git a/bootstrapper/internal/nodelock/nodelock.go b/bootstrapper/internal/nodelock/nodelock.go index 2324d912b..56923a13c 100644 --- a/bootstrapper/internal/nodelock/nodelock.go +++ b/bootstrapper/internal/nodelock/nodelock.go @@ -1,6 +1,10 @@ package nodelock -import "sync" +import ( + "sync" + + "github.com/edgelesssys/constellation/internal/attestation/vtpm" +) // Lock locks the node once there the join or the init is at a point // where there is no turning back and the other operation does not need @@ -10,16 +14,39 @@ import "sync" // There is no way to unlock, so the state changes only once from unlock to // locked. type Lock struct { - mux *sync.Mutex + tpm vtpm.TPMOpenFunc + locked bool + state *sync.Mutex + mux *sync.RWMutex } // New creates a new NodeLock, which is unlocked. -func New() *Lock { - return &Lock{mux: &sync.Mutex{}} +func New(tpm vtpm.TPMOpenFunc) *Lock { + return &Lock{ + tpm: tpm, + state: &sync.Mutex{}, + mux: &sync.RWMutex{}, + } } // TryLockOnce tries to lock the node. If the node is already locked, it // returns false. If the node is unlocked, it locks it and returns true. -func (n *Lock) TryLockOnce() bool { - return n.mux.TryLock() +func (l *Lock) TryLockOnce(ownerID, clusterID []byte) (bool, error) { + success := l.state.TryLock() + if success { + l.mux.Lock() + defer l.mux.Unlock() + l.locked = true + if err := vtpm.MarkNodeAsBootstrapped(l.tpm, ownerID, clusterID); err != nil { + return success, err + } + } + return success, nil +} + +// Locked returns true if the node is locked. +func (l *Lock) Locked() bool { + l.mux.RLock() + defer l.mux.RUnlock() + return l.locked } diff --git a/bootstrapper/role/role_test.go b/bootstrapper/role/role_test.go index 4ab99b056..895deb0ad 100644 --- a/bootstrapper/role/role_test.go +++ b/bootstrapper/role/role_test.go @@ -15,24 +15,24 @@ func TestMain(m *testing.M) { func TestMarshal(t *testing.T) { testCases := map[string]struct { role Role - wantJson string + wantJSON string wantErr bool }{ "controlePlane role": { role: ControlPlane, - wantJson: `"ControlPlane"`, + wantJSON: `"ControlPlane"`, }, "node role": { role: Worker, - wantJson: `"Worker"`, + wantJSON: `"Worker"`, }, "admin role": { role: Admin, - wantJson: `"Admin"`, + wantJSON: `"Admin"`, }, "unknown role": { role: Unknown, - wantJson: `"Unknown"`, + wantJSON: `"Unknown"`, }, } @@ -48,7 +48,7 @@ func TestMarshal(t *testing.T) { } require.NoError(err) - assert.Equal(tc.wantJson, string(jsonRole)) + assert.Equal(tc.wantJSON, string(jsonRole)) }) } } diff --git a/cli/internal/azure/client/client.go b/cli/internal/azure/client/client.go index 6f2436165..3ce9ec191 100644 --- a/cli/internal/azure/client/client.go +++ b/cli/internal/azure/client/client.go @@ -154,6 +154,10 @@ func (c *Client) GetState() (state.ConstellationState, error) { return state.ConstellationState{}, errors.New("client has no uid") } stat.UID = c.uid + if len(c.loadBalancerPubIP) == 0 { + return state.ConstellationState{}, errors.New("client has no load balancer public IP") + } + stat.BootstrapperHost = c.loadBalancerPubIP if len(c.location) == 0 { return state.ConstellationState{}, errors.New("client has no location") } @@ -213,6 +217,10 @@ func (c *Client) SetState(stat state.ConstellationState) error { return errors.New("state has no uuid") } c.uid = stat.UID + if len(stat.BootstrapperHost) == 0 { + return errors.New("state has no bootstrapper host") + } + c.loadBalancerPubIP = stat.BootstrapperHost if len(stat.AzureLocation) == 0 { return errors.New("state has no location") } diff --git a/cli/internal/azure/client/client_test.go b/cli/internal/azure/client/client_test.go index c24b4db18..06bb015d1 100644 --- a/cli/internal/azure/client/client_test.go +++ b/cli/internal/azure/client/client_test.go @@ -37,6 +37,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -58,6 +59,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -80,6 +82,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -107,6 +110,7 @@ func TestSetGetState(t *testing.T) { }, }, UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -134,6 +138,35 @@ func TestSetGetState(t *testing.T) { }, }, Name: "name", + BootstrapperHost: "bootstrapper-host", + AzureResourceGroup: "resource-group", + AzureLocation: "location", + AzureSubscription: "subscription", + AzureTenant: "tenant", + AzureSubnet: "azure-subnet", + AzureNetworkSecurityGroup: "network-security-group", + AzureWorkersScaleSet: "worker-scale-set", + AzureControlPlanesScaleSet: "controlplane-scale-set", + }, + wantErr: true, + }, + "missing bootstrapper host": { + state: state.ConstellationState{ + CloudProvider: cloudprovider.Azure.String(), + AzureWorkers: cloudtypes.Instances{ + "0": { + PublicIP: "ip1", + PrivateIP: "ip2", + }, + }, + AzureControlPlane: cloudtypes.Instances{ + "0": { + PublicIP: "ip3", + PrivateIP: "ip4", + }, + }, + Name: "name", + UID: "uid", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -162,6 +195,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureLocation: "location", AzureSubscription: "subscription", AzureTenant: "tenant", @@ -189,6 +223,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureSubscription: "subscription", AzureTenant: "tenant", @@ -216,6 +251,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureTenant: "tenant", AzureLocation: "location", @@ -243,6 +279,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureSubscription: "subscription", AzureLocation: "location", @@ -270,6 +307,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -297,6 +335,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -324,6 +363,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -351,6 +391,7 @@ func TestSetGetState(t *testing.T) { }, Name: "name", UID: "uid", + BootstrapperHost: "bootstrapper-host", AzureResourceGroup: "resource-group", AzureLocation: "location", AzureSubscription: "subscription", @@ -400,6 +441,7 @@ func TestSetGetState(t *testing.T) { controlPlanes: tc.state.AzureControlPlane, name: tc.state.Name, uid: tc.state.UID, + loadBalancerPubIP: tc.state.BootstrapperHost, resourceGroup: tc.state.AzureResourceGroup, location: tc.state.AzureLocation, subscriptionID: tc.state.AzureSubscription, diff --git a/cli/internal/azure/client/compute.go b/cli/internal/azure/client/compute.go index 1bffdbc29..949aba3c1 100644 --- a/cli/internal/azure/client/compute.go +++ b/cli/internal/azure/client/compute.go @@ -61,14 +61,6 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput } c.controlPlanes = instances - // Set the load balancer public IP in the first control plane - coord, ok := c.controlPlanes["0"] - if !ok { - return errors.New("control plane 0 not found") - } - coord.PublicIP = c.loadBalancerPubIP - c.controlPlanes["0"] = coord - return nil } diff --git a/cli/internal/azure/client/compute_test.go b/cli/internal/azure/client/compute_test.go index 1cbea2085..d3d5e6ce2 100644 --- a/cli/internal/azure/client/compute_test.go +++ b/cli/internal/azure/client/compute_test.go @@ -228,7 +228,7 @@ func TestCreateInstances(t *testing.T) { assert.NotEmpty(client.workers["0"].PrivateIP) assert.NotEmpty(client.workers["0"].PublicIP) assert.NotEmpty(client.controlPlanes["0"].PrivateIP) - assert.Equal("lbip", client.controlPlanes["0"].PublicIP) + assert.NotEmpty(client.controlPlanes["0"].PublicIP) } }) } diff --git a/cli/internal/azure/instances.go b/cli/internal/azure/instances.go index db999ac01..d15a866c3 100644 --- a/cli/internal/azure/instances.go +++ b/cli/internal/azure/instances.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" ) +// VMInstance describes a single instance. // TODO: deprecate as soon as scale sets are available. type VMInstance struct { Name string @@ -20,6 +21,7 @@ type VMInstance struct { Image string } +// Azure makes a new virtual machine template with default values. // TODO: deprecate as soon as scale sets are available. func (i VMInstance) Azure() armcompute.VirtualMachine { return armcompute.VirtualMachine{ diff --git a/cli/internal/cloudcmd/create.go b/cli/internal/cloudcmd/create.go index 2c679c913..04636a759 100644 --- a/cli/internal/cloudcmd/create.go +++ b/cli/internal/cloudcmd/create.go @@ -123,7 +123,7 @@ func (c *Creator) createGCP(ctx context.Context, cl gcpclient, config *config.Co createInput := gcpcl.CreateInstancesInput{ CountControlPlanes: controlPlaneCount, CountWorkers: workerCount, - ImageId: config.Provider.GCP.Image, + ImageID: config.Provider.GCP.Image, InstanceType: insType, StateDiskSizeGB: config.StateDiskSizeGB, KubeEnv: gcp.KubeEnv, diff --git a/cli/internal/cloudcmd/serviceaccount.go b/cli/internal/cloudcmd/serviceaccount.go index cc1d21a78..11d576269 100644 --- a/cli/internal/cloudcmd/serviceaccount.go +++ b/cli/internal/cloudcmd/serviceaccount.go @@ -11,7 +11,7 @@ import ( "github.com/edgelesssys/constellation/internal/state" ) -// ServieAccountCreator creates service accounts. +// ServiceAccountCreator creates service accounts. type ServiceAccountCreator struct { newGCPClient func(ctx context.Context) (gcpclient, error) newAzureClient func(subscriptionID, tenantID string) (azureclient, error) diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go index 14d19bc4f..53925e0cd 100644 --- a/cli/internal/cmd/init.go +++ b/cli/internal/cmd/init.go @@ -102,7 +102,7 @@ func initialize(cmd *cobra.Command, dialer grpcDialer, serviceAccCreator service return err } - controlPlanes, workers, err := getScalingGroupsFromConfig(stat, config) + controlPlanes, workers, err := getScalingGroupsFromState(stat, config) if err != nil { return err } @@ -123,7 +123,7 @@ func initialize(cmd *cobra.Command, dialer grpcDialer, serviceAccCreator service KubernetesVersion: "1.23.6", SshUserKeys: ssh.ToProtoSlice(sshUsers), } - resp, err := initCall(cmd.Context(), dialer, controlPlanes.PublicIPs()[0], req) + resp, err := initCall(cmd.Context(), dialer, stat.BootstrapperHost, req) if err != nil { return err } @@ -269,7 +269,7 @@ func readOrGenerateMasterSecret(writer io.Writer, fileHandler file.Handler, file return masterSecret, nil } -func getScalingGroupsFromConfig(stat state.ConstellationState, config *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) { +func getScalingGroupsFromState(stat state.ConstellationState, config *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) { switch { case len(stat.GCPControlPlanes) != 0: return getGCPInstances(stat, config) @@ -329,7 +329,7 @@ func getAzureInstances(stat state.ConstellationState, config *config.Config) (co return } -func getQEMUInstances(stat state.ConstellationState, config *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) { +func getQEMUInstances(stat state.ConstellationState, _ *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) { controlPlanesMap := stat.QEMUControlPlane if len(controlPlanesMap) == 0 { return cloudtypes.ScalingGroup{}, cloudtypes.ScalingGroup{}, errors.New("no controlPlanes available, can't create Constellation without any instance") diff --git a/cli/internal/cmd/init_test.go b/cli/internal/cmd/init_test.go index 65f0ea99d..6863fd557 100644 --- a/cli/internal/cmd/init_test.go +++ b/cli/internal/cmd/init_test.go @@ -38,7 +38,8 @@ func TestInitArgumentValidation(t *testing.T) { func TestInitialize(t *testing.T) { testGcpState := state.ConstellationState{ - CloudProvider: "GCP", + CloudProvider: "GCP", + BootstrapperHost: "192.0.2.1", GCPWorkers: cloudtypes.Instances{ "id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"}, "id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"}, @@ -48,7 +49,8 @@ func TestInitialize(t *testing.T) { }, } testAzureState := state.ConstellationState{ - CloudProvider: "Azure", + CloudProvider: "Azure", + BootstrapperHost: "192.0.2.1", AzureWorkers: cloudtypes.Instances{ "id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"}, "id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"}, @@ -59,7 +61,8 @@ func TestInitialize(t *testing.T) { AzureResourceGroup: "test", } testQemuState := state.ConstellationState{ - CloudProvider: "QEMU", + CloudProvider: "QEMU", + BootstrapperHost: "192.0.2.1", QEMUWorkers: cloudtypes.Instances{ "id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"}, "id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"}, @@ -183,7 +186,7 @@ func TestWriteOutput(t *testing.T) { ownerID := base64.StdEncoding.EncodeToString(resp.OwnerId) clusterID := base64.StdEncoding.EncodeToString(resp.ClusterId) - expectedIdFile := clusterIDsFile{ + expectedIDFile := clusterIDsFile{ ClusterID: clusterID, OwnerID: ownerID, Endpoint: net.JoinHostPort("ip", strconv.Itoa(constants.VerifyServiceNodePortGRPC)), @@ -206,10 +209,10 @@ func TestWriteOutput(t *testing.T) { idsFile, err := afs.ReadFile(constants.ClusterIDsFileName) assert.NoError(err) - var testIdFile clusterIDsFile - err = json.Unmarshal(idsFile, &testIdFile) + var testIDFile clusterIDsFile + err = json.Unmarshal(idsFile, &testIDFile) assert.NoError(err) - assert.Equal(expectedIdFile, testIdFile) + assert.Equal(expectedIDFile, testIDFile) } func TestInitCompletion(t *testing.T) { diff --git a/cli/internal/cmd/version.go b/cli/internal/cmd/version.go index 3cf32b73b..6802b6fd2 100644 --- a/cli/internal/cmd/version.go +++ b/cli/internal/cmd/version.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" ) -// NewVerifyCmd returns a new cobra.Command for the verify command. +// NewVersionCmd returns a new cobra.Command for the verify command. func NewVersionCmd() *cobra.Command { cmd := &cobra.Command{ Use: "version", diff --git a/cli/internal/gcp/client/client.go b/cli/internal/gcp/client/client.go index db53f6ab1..f304069e0 100644 --- a/cli/internal/gcp/client/client.go +++ b/cli/internal/gcp/client/client.go @@ -227,6 +227,11 @@ func (c *Client) GetState() (state.ConstellationState, error) { return state.ConstellationState{}, errors.New("client has no controlPlanes") } stat.GCPControlPlanes = c.controlPlanes + publicIPs := c.controlPlanes.PublicIPs() + if len(publicIPs) == 0 { + return state.ConstellationState{}, errors.New("client has no bootstrapper endpoint") + } + stat.BootstrapperHost = publicIPs[0] if c.workerInstanceGroup == "" { return state.ConstellationState{}, errors.New("client has no workerInstanceGroup") diff --git a/cli/internal/gcp/client/client_test.go b/cli/internal/gcp/client/client_test.go index cd3f1b923..315588a24 100644 --- a/cli/internal/gcp/client/client_test.go +++ b/cli/internal/gcp/client/client_test.go @@ -46,6 +46,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -73,6 +74,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -100,6 +102,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -132,6 +135,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -164,6 +168,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -196,6 +201,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -228,6 +234,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -260,6 +267,7 @@ func TestSetGetState(t *testing.T) { GCPZone: "zone-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -291,6 +299,7 @@ func TestSetGetState(t *testing.T) { GCPProject: "proj-id", GCPZone: "zone-id", UID: "uid", + BootstrapperHost: "ip3", GCPRegion: "region-id", GCPNetwork: "net-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -322,6 +331,7 @@ func TestSetGetState(t *testing.T) { GCPProject: "proj-id", GCPZone: "zone-id", Name: "name", + BootstrapperHost: "ip3", GCPRegion: "region-id", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", @@ -356,6 +366,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPWorkerInstanceTemplate: "temp-id", @@ -388,6 +399,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPFirewalls: []string{"fw-1", "fw-2"}, GCPWorkerInstanceTemplate: "temp-id", GCPControlPlaneInstanceTemplate: "temp-id", @@ -419,6 +431,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPFirewalls: []string{"fw-1", "fw-2"}, GCPWorkerInstanceTemplate: "temp-id", @@ -451,6 +464,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPFirewalls: []string{"fw-1", "fw-2"}, GCPWorkerInstanceTemplate: "temp-id", @@ -483,6 +497,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPFirewalls: []string{"fw-1", "fw-2"}, GCPWorkerInstanceTemplate: "temp-id", @@ -515,6 +530,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -547,6 +563,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -579,6 +596,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -611,6 +629,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -643,6 +662,7 @@ func TestSetGetState(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -745,6 +765,7 @@ func TestSetStateCloudProvider(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, @@ -776,6 +797,7 @@ func TestSetStateCloudProvider(t *testing.T) { GCPRegion: "region-id", Name: "name", UID: "uid", + BootstrapperHost: "ip3", GCPNetwork: "net-id", GCPSubnetwork: "subnet-id", GCPFirewalls: []string{"fw-1", "fw-2"}, diff --git a/cli/internal/gcp/client/instances.go b/cli/internal/gcp/client/instances.go index 4d515c3cb..006a597d3 100644 --- a/cli/internal/gcp/client/instances.go +++ b/cli/internal/gcp/client/instances.go @@ -30,7 +30,7 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput Network: c.network, SecondarySubnetworkRangeName: c.secondarySubnetworkRange, Subnetwork: c.subnetwork, - ImageId: input.ImageId, + ImageID: input.ImageID, InstanceType: input.InstanceType, StateDiskSizeGB: int64(input.StateDiskSizeGB), Role: role.Worker.String(), @@ -52,7 +52,7 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput Network: c.network, Subnetwork: c.subnetwork, SecondarySubnetworkRangeName: c.secondarySubnetworkRange, - ImageId: input.ImageId, + ImageID: input.ImageID, InstanceType: input.InstanceType, StateDiskSizeGB: int64(input.StateDiskSizeGB), Role: role.ControlPlane.String(), @@ -197,13 +197,13 @@ func (c *Client) deleteInstanceGroupManager(ctx context.Context, instanceGroupMa return c.instanceGroupManagersAPI.Delete(ctx, req) } -func (c *Client) waitForInstanceGroupScaling(ctx context.Context, groupId string) error { +func (c *Client) waitForInstanceGroupScaling(ctx context.Context, groupID string) error { for { if err := ctx.Err(); err != nil { return err } listReq := &computepb.ListManagedInstancesInstanceGroupManagersRequest{ - InstanceGroupManager: groupId, + InstanceGroupManager: groupID, Project: c.project, Zone: c.zone, } @@ -228,9 +228,9 @@ func (c *Client) waitForInstanceGroupScaling(ctx context.Context, groupId string } // getInstanceIPs requests the IPs of the client's instances. -func (c *Client) getInstanceIPs(ctx context.Context, groupId string, list cloudtypes.Instances) error { +func (c *Client) getInstanceIPs(ctx context.Context, groupID string, list cloudtypes.Instances) error { req := &computepb.ListInstancesRequest{ - Filter: proto.String("name=" + groupId + "*"), + Filter: proto.String("name=" + groupID + "*"), Project: c.project, Zone: c.zone, } @@ -292,7 +292,7 @@ func (i *instanceGroupManagerInput) InsertInstanceGroupManagerRequest() computep type CreateInstancesInput struct { CountWorkers int CountControlPlanes int - ImageId string + ImageID string InstanceType string StateDiskSizeGB int KubeEnv string @@ -303,7 +303,7 @@ type insertInstanceTemplateInput struct { Network string Subnetwork string SecondarySubnetworkRangeName string - ImageId string + ImageID string InstanceType string StateDiskSizeGB int64 Role string @@ -328,7 +328,7 @@ func (i insertInstanceTemplateInput) insertInstanceTemplateRequest() *computepb. { InitializeParams: &computepb.AttachedDiskInitializeParams{ DiskSizeGb: proto.Int64(10), - SourceImage: proto.String(i.ImageId), + SourceImage: proto.String(i.ImageID), }, AutoDelete: proto.Bool(true), Boot: proto.Bool(true), diff --git a/cli/internal/gcp/client/instances_test.go b/cli/internal/gcp/client/instances_test.go index da7879b28..19893e06b 100644 --- a/cli/internal/gcp/client/instances_test.go +++ b/cli/internal/gcp/client/instances_test.go @@ -43,7 +43,7 @@ func TestCreateInstances(t *testing.T) { testInput := CreateInstancesInput{ CountControlPlanes: 3, CountWorkers: 4, - ImageId: "img", + ImageID: "img", InstanceType: "n2d-standard-2", KubeEnv: "kube-env", } diff --git a/cli/internal/gcp/client/network.go b/cli/internal/gcp/client/network.go index 5e9675d2f..717d352d1 100644 --- a/cli/internal/gcp/client/network.go +++ b/cli/internal/gcp/client/network.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/edgelesssys/constellation/internal/cloud/cloudtypes" - "google.golang.org/genproto/googleapis/cloud/compute/v1" computepb "google.golang.org/genproto/googleapis/cloud/compute/v1" "google.golang.org/protobuf/proto" ) @@ -208,11 +207,13 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error { Region: c.region, HealthCheckResource: &computepb.HealthCheck{ Name: proto.String(c.healthCheck), - Type: proto.String(compute.HealthCheck_Type_name[int32(compute.HealthCheck_TCP)]), + Type: proto.String(computepb.HealthCheck_Type_name[int32(computepb.HealthCheck_HTTPS)]), CheckIntervalSec: proto.Int32(1), TimeoutSec: proto.Int32(1), - TcpHealthCheck: &computepb.TCPHealthCheck{ - Port: proto.Int32(6443), + HttpsHealthCheck: &computepb.HTTPSHealthCheck{ + Host: proto.String(""), + Port: proto.Int32(6443), + RequestPath: proto.String("/readyz"), }, }, }) @@ -229,13 +230,13 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error { Region: c.region, BackendServiceResource: &computepb.BackendService{ Name: proto.String(c.backendService), - Protocol: proto.String(compute.BackendService_Protocol_name[int32(compute.BackendService_TCP)]), - LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(compute.BackendService_EXTERNAL)]), + Protocol: proto.String(computepb.BackendService_Protocol_name[int32(computepb.BackendService_TCP)]), + LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(computepb.BackendService_EXTERNAL)]), TimeoutSec: proto.Int32(10), HealthChecks: []string{"https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/healthChecks/" + c.healthCheck}, Backends: []*computepb.Backend{ { - BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(compute.Backend_CONNECTION)]), + BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_CONNECTION)]), Group: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/zones/" + c.zone + "/instanceGroups/" + c.controlPlaneInstanceGroup), }, }, @@ -254,8 +255,8 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error { Region: c.region, ForwardingRuleResource: &computepb.ForwardingRule{ Name: proto.String(c.forwardingRule), - IPProtocol: proto.String(compute.ForwardingRule_IPProtocolEnum_name[int32(compute.ForwardingRule_TCP)]), - LoadBalancingScheme: proto.String(compute.ForwardingRule_LoadBalancingScheme_name[int32(compute.ForwardingRule_EXTERNAL)]), + IPProtocol: proto.String(computepb.ForwardingRule_IPProtocolEnum_name[int32(computepb.ForwardingRule_TCP)]), + LoadBalancingScheme: proto.String(computepb.ForwardingRule_LoadBalancingScheme_name[int32(computepb.ForwardingRule_EXTERNAL)]), Ports: []string{"6443", "9000"}, BackendService: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/backendServices/" + c.backendService), }, @@ -295,7 +296,7 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error { return c.waitForOperations(ctx, []Operation{resp}) } -// TerminteLoadBalancer removes the load balancer and its associated resources. +// TerminateLoadBalancer removes the load balancer and its associated resources. func (c *Client) TerminateLoadBalancer(ctx context.Context) error { resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteForwardingRuleRequest{ Project: c.project, diff --git a/debugd/cdbg/cmd/deploy.go b/debugd/cdbg/cmd/deploy.go index 304327fde..d6881f41b 100644 --- a/debugd/cdbg/cmd/deploy.go +++ b/debugd/cdbg/cmd/deploy.go @@ -187,6 +187,17 @@ func getIPsFromConfig(stat statec.ConstellationState, config configc.Config) ([] ips = append(ips, ip) } } + // add bootstrapper IP if it is not already in the list + var foundBootstrapperIP bool + for _, ip := range ips { + if ip == stat.BootstrapperHost { + foundBootstrapperIP = true + break + } + } + if !foundBootstrapperIP && stat.BootstrapperHost != "" { + ips = append(ips, stat.BootstrapperHost) + } if len(ips) == 0 { return nil, fmt.Errorf("no public IPs found in statefile") } diff --git a/debugd/debugd/constants.go b/debugd/debugd/constants.go index ac58a3b15..bd7197399 100644 --- a/debugd/debugd/constants.go +++ b/debugd/debugd/constants.go @@ -18,6 +18,8 @@ Wants=network-online.target After=network-online.target [Service] Type=simple +RemainAfterExit=yes +Restart=on-failure EnvironmentFile=/etc/constellation.env ExecStartPre=-setenforce Permissive ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin/ diff --git a/debugd/debugd/deploy/service_test.go b/debugd/debugd/deploy/service_test.go index c8dc0604e..2e29d4cca 100644 --- a/debugd/debugd/deploy/service_test.go +++ b/debugd/debugd/deploy/service_test.go @@ -216,27 +216,27 @@ type fakeDbusConn struct { actionErr error } -func (f *fakeDbusConn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - f.inputs = append(f.inputs, dbusConnActionInput{name: name, mode: mode}) - ch <- f.result +func (c *fakeDbusConn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode}) + ch <- c.result - return f.jobID, f.actionErr + return c.jobID, c.actionErr } -func (f *fakeDbusConn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - f.inputs = append(f.inputs, dbusConnActionInput{name: name, mode: mode}) - ch <- f.result +func (c *fakeDbusConn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode}) + ch <- c.result - return f.jobID, f.actionErr + return c.jobID, c.actionErr } -func (f *fakeDbusConn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - f.inputs = append(f.inputs, dbusConnActionInput{name: name, mode: mode}) - ch <- f.result +func (c *fakeDbusConn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode}) + ch <- c.result - return f.jobID, f.actionErr + return c.jobID, c.actionErr } -func (s *fakeDbusConn) ReloadContext(ctx context.Context) error { - return s.actionErr +func (c *fakeDbusConn) ReloadContext(ctx context.Context) error { + return c.actionErr } diff --git a/hack/terraform-to-state/create-state.go b/hack/terraform-to-state/create-state.go index edfb12201..9a87c90ff 100644 --- a/hack/terraform-to-state/create-state.go +++ b/hack/terraform-to-state/create-state.go @@ -41,6 +41,7 @@ func transformState(tfOut terraformOutput) state.ConstellationState { Name: "qemu", UID: "debug", CloudProvider: "qemu", + BootstrapperHost: tfOut.ControlPlaneIPs.Value[0], QEMUWorkers: cloudtypes.Instances{}, QEMUControlPlane: cloudtypes.Instances{}, } diff --git a/internal/attestation/gcp/issuer_test.go b/internal/attestation/gcp/issuer_test.go index 42a2e51b8..e1fc47ac5 100644 --- a/internal/attestation/gcp/issuer_test.go +++ b/internal/attestation/gcp/issuer_test.go @@ -29,7 +29,7 @@ func TestGetGCEInstanceInfo(t *testing.T) { projectIDString: "projectID", instanceNameString: "instanceName", zoneString: "zone", - projecIdErr: errors.New("error"), + projecIDErr: errors.New("error"), }, wantErr: true, }, @@ -78,13 +78,13 @@ type fakeMetadataClient struct { projectIDString string instanceNameString string zoneString string - projecIdErr error + projecIDErr error instanceNameErr error zoneErr error } func (c fakeMetadataClient) projectID() (string, error) { - return c.projectIDString, c.projecIdErr + return c.projectIDString, c.projecIDErr } func (c fakeMetadataClient) instanceName() (string, error) { diff --git a/internal/cloud/metadata/metadata.go b/internal/cloud/metadata/metadata.go index 0340c6600..a0dc1ec57 100644 --- a/internal/cloud/metadata/metadata.go +++ b/internal/cloud/metadata/metadata.go @@ -10,7 +10,7 @@ import ( "github.com/edgelesssys/constellation/internal/constants" ) -// Instance describes metadata of a peer. +// InstanceMetadata describes metadata of a peer. type InstanceMetadata struct { Name string ProviderID string @@ -32,6 +32,7 @@ type InstanceLister interface { List(ctx context.Context) ([]InstanceMetadata, error) } +// InitServerEndpoints returns the list of endpoints for the init server, which are running on the control plane nodes. func InitServerEndpoints(ctx context.Context, lister InstanceLister) ([]string, error) { instances, err := lister.List(ctx) if err != nil { diff --git a/internal/logger/log.go b/internal/logger/log.go index a8a9847d2..ecb52dafc 100644 --- a/internal/logger/log.go +++ b/internal/logger/log.go @@ -88,7 +88,7 @@ func New(logType LogType, logLevel zapcore.Level) *Logger { return &Logger{logger: logger.Sugar()} } -// NewTestLogger creates a logger for unit / integration tests. +// NewTest creates a logger for unit / integration tests. func NewTest(t *testing.T) *Logger { return &Logger{ logger: zaptest.NewLogger(t).Sugar().Named(fmt.Sprintf("%q", t.Name())), diff --git a/internal/state/state.go b/internal/state/state.go index ff8af0da2..d9c406b57 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -6,9 +6,10 @@ import ( // ConstellationState is the state of a Constellation. type ConstellationState struct { - Name string `json:"name,omitempty"` - UID string `json:"uid,omitempty"` - CloudProvider string `json:"cloudprovider,omitempty"` + Name string `json:"name,omitempty"` + UID string `json:"uid,omitempty"` + CloudProvider string `json:"cloudprovider,omitempty"` + BootstrapperHost string `json:"bootstrapperhost,omitempty"` GCPWorkers cloudtypes.Instances `json:"gcpworkers,omitempty"` GCPControlPlanes cloudtypes.Instances `json:"gcpcontrolplanes,omitempty"` diff --git a/joinservice/cmd/main.go b/joinservice/cmd/main.go index 2e2ffb96a..8c976f79d 100644 --- a/joinservice/cmd/main.go +++ b/joinservice/cmd/main.go @@ -50,7 +50,7 @@ func main() { creds := atlscredentials.New(nil, []atls.Validator{validator}) - vpcIP, err := getIPinVPC(ctx, *provider) + vpcIP, err := getVPCIP(ctx, *provider) if err != nil { log.With(zap.Error(err)).Fatalf("Failed to get IP in VPC") } @@ -87,7 +87,7 @@ func main() { } } -func getIPinVPC(ctx context.Context, provider string) (string, error) { +func getVPCIP(ctx context.Context, provider string) (string, error) { switch cloudprovider.FromString(provider) { case cloudprovider.Azure: metadata, err := azurecloud.NewMetadata(ctx) diff --git a/joinservice/internal/kms/kms.go b/joinservice/internal/kms/kms.go index 62a28a2d7..501d67f9f 100644 --- a/joinservice/internal/kms/kms.go +++ b/joinservice/internal/kms/kms.go @@ -27,7 +27,7 @@ func New(log *logger.Logger, endpoint string) Client { } } -// GetDEK returns a data encryption key for the given UUID. +// GetDataKey returns a data encryption key for the given UUID. func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error) { log := c.log.With(zap.String("diskUUID", uuid), zap.String("endpoint", c.endpoint)) // TODO: update credentials if we enable aTLS on the KMS diff --git a/joinservice/internal/kubeadm/keymanager.go b/joinservice/internal/kubeadm/keymanager.go index 153d6073e..ffadccef8 100644 --- a/joinservice/internal/kubeadm/keymanager.go +++ b/joinservice/internal/kubeadm/keymanager.go @@ -5,6 +5,10 @@ import ( "sync" "time" + "github.com/edgelesssys/constellation/internal/constants" + "github.com/edgelesssys/constellation/internal/logger" + clientset "k8s.io/client-go/kubernetes" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts" "k8s.io/utils/clock" ) @@ -18,11 +22,15 @@ type keyManager struct { key string expirationDate time.Time clock clock.Clock + client clientset.Interface + log *logger.Logger } -func newKeyManager() *keyManager { +func newKeyManager(client clientset.Interface, log *logger.Logger) *keyManager { return &keyManager{ - clock: clock.RealClock{}, + clock: clock.RealClock{}, + client: client, + log: log, } } @@ -45,6 +53,15 @@ func (k *keyManager) getCertificatetKey() (string, error) { } k.expirationDate = k.clock.Now().Add(certificateKeyTTL) k.key = key + k.log.Infof("Uploading certs to Kubernetes") + cfg := &kubeadmapi.InitConfiguration{ + ClusterConfiguration: kubeadmapi.ClusterConfiguration{ + CertificatesDir: constants.KubeadmCertificateDir, + }, + } + if err := copycerts.UploadCerts(k.client, cfg, key); err != nil { + return "", fmt.Errorf("uploading certs: %w", err) + } case k.expirationDate.After(k.clock.Now()): // key is still valid // if TTL is less than 2 minutes away, increase it by 2 minutes diff --git a/joinservice/internal/kubeadm/keymanager_test.go b/joinservice/internal/kubeadm/keymanager_test.go index c033a18f8..c524f5bd0 100644 --- a/joinservice/internal/kubeadm/keymanager_test.go +++ b/joinservice/internal/kubeadm/keymanager_test.go @@ -4,7 +4,12 @@ import ( "testing" "time" + "github.com/edgelesssys/constellation/internal/logger" "github.com/stretchr/testify/assert" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" "k8s.io/utils/clock" testclock "k8s.io/utils/clock/testing" ) @@ -12,30 +17,42 @@ import ( func TestKeyManager(t *testing.T) { testCases := map[string]struct { clock clock.Clock + client clientset.Interface ttl time.Time key string shouldReuse bool + wantErr bool }{ "no key exists": { - clock: testclock.NewFakeClock(time.Time{}), + clock: testclock.NewFakeClock(time.Time{}), + client: fake.NewSimpleClientset(), }, "key exists and is valid": { clock: testclock.NewFakeClock(time.Time{}), + client: fake.NewSimpleClientset(), ttl: time.Time{}.Add(time.Hour), key: "key", shouldReuse: true, }, "key has expired": { - clock: testclock.NewFakeClock(time.Time{}.Add(time.Hour)), - ttl: time.Time{}, - key: "key", + clock: testclock.NewFakeClock(time.Time{}.Add(time.Hour)), + client: fake.NewSimpleClientset(), + ttl: time.Time{}, + key: "key", }, "key expires in the next 30 seconds": { clock: testclock.NewFakeClock(time.Time{}), + client: fake.NewSimpleClientset(), ttl: time.Time{}.Add(30 * time.Second), key: "key", shouldReuse: true, }, + "uploading certs fails": { + clock: testclock.NewFakeClock(time.Time{}), + client: &failingClient{ + fake.NewSimpleClientset(), + }, + }, } for name, tc := range testCases { @@ -46,9 +63,15 @@ func TestKeyManager(t *testing.T) { expirationDate: tc.ttl, key: tc.key, clock: tc.clock, + log: logger.NewTest(t), + client: fake.NewSimpleClientset(), } key, err := km.getCertificatetKey() + if tc.wantErr { + assert.Error(err) + return + } assert.NoError(err) assert.True(km.expirationDate.After(tc.clock.Now().Add(2 * time.Minute))) @@ -61,3 +84,13 @@ func TestKeyManager(t *testing.T) { }) } } + +type failingClient struct { + *fake.Clientset +} + +func (f *failingClient) CoreV1() corev1.CoreV1Interface { + return &failingCoreV1{ + &fakecorev1.FakeCoreV1{Fake: &f.Clientset.Fake}, + } +} diff --git a/joinservice/internal/kubeadm/kubeadm.go b/joinservice/internal/kubeadm/kubeadm.go index 1b2e6bcbc..73d13834c 100644 --- a/joinservice/internal/kubeadm/kubeadm.go +++ b/joinservice/internal/kubeadm/kubeadm.go @@ -16,10 +16,8 @@ import ( certutil "k8s.io/client-go/util/cert" bootstraputil "k8s.io/cluster-bootstrap/token/util" bootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node" - "k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts" "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" "k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin" ) @@ -48,7 +46,7 @@ func New(apiServerEndpoint string, log *logger.Logger) (*Kubeadm, error) { return &Kubeadm{ apiServerEndpoint: apiServerEndpoint, log: log, - keyManager: newKeyManager(), + keyManager: newKeyManager(client, log), client: client, file: file, }, nil @@ -113,21 +111,10 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov // GetControlPlaneCertificateKey uploads Kubernetes encrypted CA certificates to Kubernetes and returns the decryption key. // The key can be used by new nodes to join the cluster as a control plane node. func (k *Kubeadm) GetControlPlaneCertificateKey() (string, error) { - k.log.Infof("Creating new random control plane certificate key") + k.log.Infof("Creating new random control plane certificate key (or returning cached key)") key, err := k.keyManager.getCertificatetKey() if err != nil { return "", fmt.Errorf("couldn't create control plane certificate key: %w", err) } - - k.log.Infof("Uploading certs to Kubernetes") - cfg := &kubeadmapi.InitConfiguration{ - ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - CertificatesDir: constants.KubeadmCertificateDir, - }, - } - if err := copycerts.UploadCerts(k.client, cfg, key); err != nil { - return "", fmt.Errorf("uploading certs: %w", err) - } - return key, nil } diff --git a/joinservice/internal/kubeadm/kubeadm_test.go b/joinservice/internal/kubeadm/kubeadm_test.go index a93f2135a..ef33a1063 100644 --- a/joinservice/internal/kubeadm/kubeadm_test.go +++ b/joinservice/internal/kubeadm/kubeadm_test.go @@ -14,7 +14,6 @@ import ( "go.uber.org/goleak" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" @@ -104,53 +103,6 @@ kind: Config`, } } -func TestGetControlPlaneCertificateKey(t *testing.T) { - testCases := map[string]struct { - wantErr bool - client clientset.Interface - }{ - "success": { - client: fake.NewSimpleClientset(), - wantErr: false, - }, - "failure": { - client: &failingClient{ - fake.NewSimpleClientset(), - }, - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - client := &Kubeadm{ - keyManager: &keyManager{clock: testclock.NewFakeClock(time.Time{})}, - log: logger.NewTest(t), - client: tc.client, - } - - _, err := client.GetControlPlaneCertificateKey() - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - } - }) - } -} - -type failingClient struct { - *fake.Clientset -} - -func (f *failingClient) CoreV1() corev1.CoreV1Interface { - return &failingCoreV1{ - &fakecorev1.FakeCoreV1{Fake: &f.Clientset.Fake}, - } -} - type failingCoreV1 struct { *fakecorev1.FakeCoreV1 } diff --git a/kms/kms/azure/hsm.go b/kms/kms/azure/hsm.go index 3b4d638e9..9e540c7e9 100644 --- a/kms/kms/azure/hsm.go +++ b/kms/kms/azure/hsm.go @@ -29,7 +29,7 @@ type cryptoClientAPI interface { WrapKey(ctx context.Context, alg crypto.KeyWrapAlgorithm, key []byte, options *crypto.WrapKeyOptions) (crypto.WrapKeyResponse, error) } -// Suffix for HSM Vaults. +// HSMDefaultCloud is the suffix for HSM Vaults. const HSMDefaultCloud VaultSuffix = ".managedhsm.azure.net/" // HSMClient implements the CloudKMS interface for Azure managed HSM. diff --git a/kms/setup/setup.go b/kms/setup/setup.go index 29f53ac13..cda1fd787 100644 --- a/kms/setup/setup.go +++ b/kms/setup/setup.go @@ -28,8 +28,8 @@ const ( ) type KMSInformation struct { - KmsUri string - StorageUri string + KMSURI string + StorageURI string KeyEncryptionKeyID string } diff --git a/kms/setup/setup_test.go b/kms/setup/setup_test.go index fd4ed6485..91d691a4c 100644 --- a/kms/setup/setup_test.go +++ b/kms/setup/setup_test.go @@ -187,7 +187,7 @@ func TestGetGCPKMSConfig(t *testing.T) { } func TestGetConfig(t *testing.T) { - const testUri = "test://config?name=test-name&data=test-data&value=test-value" + const testURI = "test://config?name=test-name&data=test-data&value=test-value" testCases := map[string]struct { uri string @@ -195,17 +195,17 @@ func TestGetConfig(t *testing.T) { wantErr bool }{ "success": { - uri: testUri, + uri: testURI, keys: []string{"name", "data", "value"}, wantErr: false, }, "less keys than capture groups": { - uri: testUri, + uri: testURI, keys: []string{"name", "data"}, wantErr: false, }, "invalid regex": { - uri: testUri, + uri: testURI, keys: []string{"name", "data", "test-value"}, wantErr: true, }, @@ -215,7 +215,7 @@ func TestGetConfig(t *testing.T) { wantErr: true, }, "more keys than expected": { - uri: testUri, + uri: testURI, keys: []string{"name", "data", "value", "anotherValue"}, wantErr: true, }, diff --git a/mount/cryptmapper/cryptmapper.go b/mount/cryptmapper/cryptmapper.go index e347a8346..e52a0af8a 100644 --- a/mount/cryptmapper/cryptmapper.go +++ b/mount/cryptmapper/cryptmapper.go @@ -84,7 +84,7 @@ type DeviceMapper interface { Resize(name string, newSize uint64) error } -// cryptDevice is a wrapper for cryptsetup.Device. +// CryptDevice is a wrapper for cryptsetup.Device. type CryptDevice struct { *cryptsetup.Device } @@ -192,7 +192,7 @@ func (c *CryptMapper) ResizeCryptDevice(ctx context.Context, volumeID string) (s return cryptPrefix + volumeID, nil } -// GetDeviceName returns the real device name of a mapped crypt device. +// GetDevicePath returns the device path of a mapped crypt device. func (c *CryptMapper) GetDevicePath(volumeID string) (string, error) { return getDevicePath(c.mapper, strings.TrimPrefix(volumeID, cryptPrefix)) } diff --git a/state/keyservice/keyservice.go b/state/keyservice/keyservice.go index 99631cce0..e61b7fc9a 100644 --- a/state/keyservice/keyservice.go +++ b/state/keyservice/keyservice.go @@ -44,7 +44,7 @@ func New(log *logger.Logger, issuer QuoteIssuer, metadata metadata.InstanceListe } } -// PushStateDiskKeyRequest is the rpc to push state disk decryption keys to a restarting node. +// PushStateDiskKey is the rpc to push state disk decryption keys to a restarting node. func (a *KeyAPI) PushStateDiskKey(ctx context.Context, in *keyproto.PushStateDiskKeyRequest) (*keyproto.PushStateDiskKeyResponse, error) { a.mux.Lock() defer a.mux.Unlock() diff --git a/state/mapper/mapper.go b/state/mapper/mapper.go index 21a3e166a..855512fd6 100644 --- a/state/mapper/mapper.go +++ b/state/mapper/mapper.go @@ -35,7 +35,7 @@ func (m *Mapper) IsLUKSDevice() bool { return m.device.Load(cryptsetup.LUKS2{}) == nil } -// GetUUID gets the device's UUID. +// DiskUUID gets the device's UUID. func (m *Mapper) DiskUUID() string { return strings.ToLower(m.device.GetUUID()) } diff --git a/terraform/libvirt/main.tf b/terraform/libvirt/main.tf index 376a26030..5567e31eb 100644 --- a/terraform/libvirt/main.tf +++ b/terraform/libvirt/main.tf @@ -5,7 +5,7 @@ terraform { version = "0.6.14" } docker = { - source = "kreuzwerker/docker" + source = "kreuzwerker/docker" version = "2.17.0" } } @@ -25,24 +25,24 @@ provider "docker" { } resource "docker_image" "qemu-metadata" { - name = "ghcr.io/edgelesssys/constellation/qemu-metadata-api:latest" - keep_locally = true + name = "ghcr.io/edgelesssys/constellation/qemu-metadata-api:feat-coordinator-selfactivation-node" + keep_locally = true } resource "docker_container" "qemu-metadata" { - name = "qemu-metadata" - image = docker_image.qemu-metadata.latest + name = "qemu-metadata" + image = docker_image.qemu-metadata.latest network_mode = "host" - rm = true + rm = true mounts { source = "/var/run/libvirt/libvirt-sock" target = "/var/run/libvirt/libvirt-sock" - type = "bind" + type = "bind" } mounts { source = var.metadata_api_log_dir target = "/pcrs" - type = "bind" + type = "bind" } }