Rename coordinator to bootstrapper and rename roles

This commit is contained in:
katexochen 2022-06-29 15:26:29 +02:00 committed by Paul Meyer
parent 3280ed200c
commit 916e5d6b55
191 changed files with 1763 additions and 2030 deletions

View file

@ -0,0 +1,209 @@
package initserver
import (
"context"
"fmt"
"net"
"strings"
"github.com/edgelesssys/constellation/bootstrapper/initproto"
"github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes"
"github.com/edgelesssys/constellation/bootstrapper/internal/nodelock"
"github.com/edgelesssys/constellation/bootstrapper/nodestate"
"github.com/edgelesssys/constellation/bootstrapper/role"
"github.com/edgelesssys/constellation/bootstrapper/util"
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/file"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Server is the initialization server, which is started on each node.
// The server handles initialization calls from the CLI and initializes the
// Kubernetes cluster.
type Server struct {
nodeLock *nodelock.Lock
initializer ClusterInitializer
disk encryptedDisk
fileHandler file.Handler
grpcServer serveStopper
logger *zap.Logger
initproto.UnimplementedAPIServer
}
// New creates a new initialization server.
func New(lock *nodelock.Lock, kube ClusterInitializer, logger *zap.Logger) *Server {
logger = logger.Named("initServer")
server := &Server{
nodeLock: lock,
disk: diskencryption.New(),
initializer: kube,
logger: logger,
}
grpcLogger := logger.Named("gRPC")
grpcServer := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_ctxtags.StreamServerInterceptor(),
grpc_zap.StreamServerInterceptor(grpcLogger),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_ctxtags.UnaryServerInterceptor(),
grpc_zap.UnaryServerInterceptor(grpcLogger),
)),
)
initproto.RegisterAPIServer(grpcServer, server)
server.grpcServer = grpcServer
return server
}
func (s *Server) Serve(ip, port string) error {
lis, err := net.Listen("tcp", net.JoinHostPort(ip, port))
if err != nil {
return fmt.Errorf("failed to listen: %w", err)
}
return s.grpcServer.Serve(lis)
}
// Init initializes the cluster.
func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) {
if ok := s.nodeLock.TryLockOnce(); !ok {
// The join client seems to already have a connection to an
// existing join service. At this point, any further call to
// init does not make sense, so we just stop.
//
// The server stops itself after the current call is done.
go s.grpcServer.GracefulStop()
return nil, status.Error(codes.FailedPrecondition, "node is already being activated")
}
id, err := s.deriveAttestationID(req.MasterSecret)
if err != nil {
return nil, status.Errorf(codes.Internal, "%s", err)
}
if err := s.setupDisk(req.MasterSecret); err != nil {
return nil, status.Errorf(codes.Internal, "setting up disk: %s", err)
}
state := nodestate.NodeState{
Role: role.ControlPlane,
OwnerID: id.Owner,
ClusterID: id.Cluster,
}
if err := state.ToFile(s.fileHandler); err != nil {
return nil, status.Errorf(codes.Internal, "persisting node state: %s", err)
}
kubeconfig, err := s.initializer.InitCluster(ctx,
req.AutoscalingNodeGroups,
req.CloudServiceAccountUri,
req.KubernetesVersion,
id,
kubernetes.KMSConfig{
MasterSecret: req.MasterSecret,
KMSURI: req.KmsUri,
StorageURI: req.StorageUri,
KeyEncryptionKeyID: req.KeyEncryptionKeyId,
UseExistingKEK: req.UseExistingKek,
},
sshProtoKeysToMap(req.SshUserKeys),
)
if err != nil {
return nil, status.Errorf(codes.Internal, "initializing cluster: %s", err)
}
return &initproto.InitResponse{
Kubeconfig: kubeconfig,
OwnerId: id.Owner,
ClusterId: id.Cluster,
}, nil
}
func (s *Server) setupDisk(masterSecret []byte) error {
if err := s.disk.Open(); err != nil {
return fmt.Errorf("opening encrypted disk: %w", err)
}
defer s.disk.Close()
uuid, err := s.disk.UUID()
if err != nil {
return fmt.Errorf("retrieving uuid of disk: %w", err)
}
uuid = strings.ToLower(uuid)
// TODO: Choose a way to salt the key derivation
diskKey, err := util.DeriveKey(masterSecret, []byte("Constellation"), []byte("key"+uuid), 32)
if err != nil {
return err
}
return s.disk.UpdatePassphrase(string(diskKey))
}
func (s *Server) deriveAttestationID(masterSecret []byte) (attestationtypes.ID, error) {
clusterID, err := util.GenerateRandomBytes(constants.RNGLengthDefault)
if err != nil {
return attestationtypes.ID{}, err
}
// TODO: Choose a way to salt the key derivation
ownerID, err := util.DeriveKey(masterSecret, []byte("Constellation"), []byte("id"), constants.RNGLengthDefault)
if err != nil {
return attestationtypes.ID{}, err
}
return attestationtypes.ID{Owner: ownerID, Cluster: clusterID}, nil
}
func sshProtoKeysToMap(keys []*initproto.SSHUserKey) map[string]string {
keyMap := make(map[string]string)
for _, key := range keys {
keyMap[key.Username] = key.PublicKey
}
return keyMap
}
// ClusterInitializer has the ability to initialize a cluster.
type ClusterInitializer interface {
// InitCluster initializes a new Kubernetes cluster.
InitCluster(
ctx context.Context,
autoscalingNodeGroups []string,
cloudServiceAccountURI string,
k8sVersion string,
id attestationtypes.ID,
kmsConfig kubernetes.KMSConfig,
sshUserKeys map[string]string,
) ([]byte, error)
}
type encryptedDisk interface {
// Open prepares the underlying device for disk operations.
Open() error
// Close closes the underlying device.
Close() error
// UUID gets the device's UUID.
UUID() (string, error)
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
UpdatePassphrase(passphrase string) error
}
type serveStopper interface {
// Serve starts the server.
Serve(lis net.Listener) error
// GracefulStop stops the server and blocks until all requests are done.
GracefulStop()
}

View file

@ -0,0 +1,238 @@
package initserver
import (
"context"
"errors"
"net"
"testing"
"time"
"github.com/edgelesssys/constellation/bootstrapper/initproto"
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes"
"github.com/edgelesssys/constellation/bootstrapper/internal/nodelock"
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
"github.com/edgelesssys/constellation/internal/file"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestNew(t *testing.T) {
assert := assert.New(t)
server := New(nodelock.New(), &stubClusterInitializer{}, zap.NewNop())
assert.NotNil(server)
assert.NotNil(server.logger)
assert.NotNil(server.nodeLock)
assert.NotNil(server.initializer)
assert.NotNil(server.grpcServer)
assert.NotNil(server.fileHandler)
assert.NotNil(server.disk)
}
func TestInit(t *testing.T) {
someErr := errors.New("failed")
lockedNodeLock := nodelock.New()
require.True(t, lockedNodeLock.TryLockOnce())
testCases := map[string]struct {
nodeLock *nodelock.Lock
initializer ClusterInitializer
disk encryptedDisk
fileHandler file.Handler
req *initproto.InitRequest
wantErr bool
wantShutdown bool
}{
"successful init": {
nodeLock: nodelock.New(),
initializer: &stubClusterInitializer{},
disk: &stubDisk{},
fileHandler: file.NewHandler(afero.NewMemMapFs()),
req: &initproto.InitRequest{},
},
"node locked": {
nodeLock: lockedNodeLock,
initializer: &stubClusterInitializer{},
disk: &stubDisk{},
fileHandler: file.NewHandler(afero.NewMemMapFs()),
req: &initproto.InitRequest{},
wantErr: true,
wantShutdown: true,
},
"disk open error": {
nodeLock: nodelock.New(),
initializer: &stubClusterInitializer{},
disk: &stubDisk{openErr: someErr},
fileHandler: file.NewHandler(afero.NewMemMapFs()),
req: &initproto.InitRequest{},
wantErr: true,
},
"disk uuid error": {
nodeLock: nodelock.New(),
initializer: &stubClusterInitializer{},
disk: &stubDisk{uuidErr: someErr},
fileHandler: file.NewHandler(afero.NewMemMapFs()),
req: &initproto.InitRequest{},
wantErr: true,
},
"disk update passphrase error": {
nodeLock: nodelock.New(),
initializer: &stubClusterInitializer{},
disk: &stubDisk{updatePassphraseErr: someErr},
fileHandler: file.NewHandler(afero.NewMemMapFs()),
req: &initproto.InitRequest{},
wantErr: true,
},
"write state file error": {
nodeLock: nodelock.New(),
initializer: &stubClusterInitializer{},
disk: &stubDisk{},
fileHandler: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())),
req: &initproto.InitRequest{},
wantErr: true,
},
"initialize cluster error": {
nodeLock: nodelock.New(),
initializer: &stubClusterInitializer{initClusterErr: someErr},
disk: &stubDisk{},
fileHandler: file.NewHandler(afero.NewMemMapFs()),
req: &initproto.InitRequest{},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
serveStopper := newStubServeStopper()
server := &Server{
nodeLock: tc.nodeLock,
initializer: tc.initializer,
disk: tc.disk,
fileHandler: tc.fileHandler,
logger: zaptest.NewLogger(t),
grpcServer: serveStopper,
}
kubeconfig, err := server.Init(context.Background(), tc.req)
if tc.wantErr {
assert.Error(err)
if tc.wantShutdown {
select {
case <-serveStopper.shutdownCalled:
case <-time.After(time.Second):
t.Fatal("grpc server did not shut down")
}
}
return
}
assert.NoError(err)
assert.NotNil(kubeconfig)
assert.False(server.nodeLock.TryLockOnce()) // lock should be locked
})
}
}
func TestSSHProtoKeysToMap(t *testing.T) {
testCases := map[string]struct {
keys []*initproto.SSHUserKey
want map[string]string
}{
"empty": {
keys: []*initproto.SSHUserKey{},
want: map[string]string{},
},
"one key": {
keys: []*initproto.SSHUserKey{
{Username: "key1", PublicKey: "key1-key"},
},
want: map[string]string{
"key1": "key1-key",
},
},
"two keys": {
keys: []*initproto.SSHUserKey{
{Username: "key1", PublicKey: "key1-key"},
{Username: "key2", PublicKey: "key2-key"},
},
want: map[string]string{
"key1": "key1-key",
"key2": "key2-key",
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
got := sshProtoKeysToMap(tc.keys)
assert.Equal(tc.want, got)
})
}
}
type stubDisk struct {
openErr error
closeErr error
uuid string
uuidErr error
updatePassphraseErr error
updatePassphraseCalled bool
}
func (d *stubDisk) Open() error {
return d.openErr
}
func (d *stubDisk) Close() error {
return d.closeErr
}
func (d *stubDisk) UUID() (string, error) {
return d.uuid, d.uuidErr
}
func (d *stubDisk) UpdatePassphrase(string) error {
d.updatePassphraseCalled = true
return d.updatePassphraseErr
}
type stubClusterInitializer struct {
initClusterKubeconfig []byte
initClusterErr error
}
func (i *stubClusterInitializer) InitCluster(context.Context, []string, string, string, attestationtypes.ID, kubernetes.KMSConfig, map[string]string,
) ([]byte, error) {
return i.initClusterKubeconfig, i.initClusterErr
}
type stubServeStopper struct {
shutdownCalled chan struct{}
}
func newStubServeStopper() *stubServeStopper {
return &stubServeStopper{shutdownCalled: make(chan struct{}, 1)}
}
func (s *stubServeStopper) Serve(net.Listener) error {
panic("should not be called in a test")
}
func (s *stubServeStopper) GracefulStop() {
s.shutdownCalled <- struct{}{}
}