From 2023edaef0c2cdd0c73ce2c234236f7fea0a1652 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Wei=C3=9Fe?= <66256922+daniel-weisse@users.noreply.github.com> Date: Fri, 3 Mar 2023 16:50:01 +0100 Subject: [PATCH] bootstrapper: stop join-client earlier (#1268) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Daniel Weiße --- bootstrapper/internal/initserver/initserver.go | 6 +++++- bootstrapper/internal/initserver/initserver_test.go | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/bootstrapper/internal/initserver/initserver.go b/bootstrapper/internal/initserver/initserver.go index b84edd6e8..5f44f1756 100644 --- a/bootstrapper/internal/initserver/initserver.go +++ b/bootstrapper/internal/initserver/initserver.go @@ -113,7 +113,6 @@ func (s *Server) Serve(ip, port string, cleaner cleaner) error { // Init initializes the cluster. func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) { - defer s.cleaner.Clean() log := s.log.With(zap.String("peer", grpclog.PeerAddrFromContext(ctx))) log.Infof("Init called") @@ -146,6 +145,11 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro return nil, status.Error(codes.FailedPrecondition, "node is already being activated") } + // Stop the join client -> We no longer expect to join an existing cluster, + // since we are bootstrapping a new one. + // Any errors following this call will result in a failed node that may not join any cluster. + s.cleaner.Clean() + if err := s.setupDisk(ctx, cloudKms); err != nil { return nil, status.Errorf(codes.Internal, "setting up disk: %s", err) } diff --git a/bootstrapper/internal/initserver/initserver_test.go b/bootstrapper/internal/initserver/initserver_test.go index 4347ef9bc..1d974b1e6 100644 --- a/bootstrapper/internal/initserver/initserver_test.go +++ b/bootstrapper/internal/initserver/initserver_test.go @@ -110,6 +110,7 @@ func TestInit(t *testing.T) { fileHandler: file.NewHandler(afero.NewMemMapFs()), initSecretHash: initSecretHash, req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, + wantShutdown: true, }, "node locked": { nodeLock: lockedLock, @@ -119,7 +120,6 @@ func TestInit(t *testing.T) { req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, initSecretHash: initSecretHash, wantErr: true, - wantShutdown: true, }, "disk open error": { nodeLock: newFakeLock(), @@ -129,6 +129,7 @@ func TestInit(t *testing.T) { req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, initSecretHash: initSecretHash, wantErr: true, + wantShutdown: true, }, "disk uuid error": { nodeLock: newFakeLock(), @@ -138,6 +139,7 @@ func TestInit(t *testing.T) { req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, initSecretHash: initSecretHash, wantErr: true, + wantShutdown: true, }, "disk update passphrase error": { nodeLock: newFakeLock(), @@ -147,6 +149,7 @@ func TestInit(t *testing.T) { req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, initSecretHash: initSecretHash, wantErr: true, + wantShutdown: true, }, "write state file error": { nodeLock: newFakeLock(), @@ -156,6 +159,7 @@ func TestInit(t *testing.T) { req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, initSecretHash: initSecretHash, wantErr: true, + wantShutdown: true, }, "initialize cluster error": { nodeLock: newFakeLock(), @@ -165,6 +169,7 @@ func TestInit(t *testing.T) { req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, initSecretHash: initSecretHash, wantErr: true, + wantShutdown: true, }, "wrong initSecret": { nodeLock: newFakeLock(),