mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-25 07:29:38 -05:00
Only upload kubeadm certs if key is rotated
Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> Co-authored-by: 3u13r <lc@edgeless.systems>
This commit is contained in:
parent
586b65f089
commit
260d2571c1
4
.github/workflows/build-bootstrapper.yml
vendored
4
.github/workflows/build-bootstrapper.yml
vendored
@ -1,4 +1,4 @@
|
|||||||
# We build the coordinator as part of each PR to see that the build still works. An image is only created once merged to main (see condition on call-coreos).
|
# We build the bootstrapper as part of each PR to see that the build still works. An image is only created once merged to main (see condition on call-coreos).
|
||||||
name: Build and Upload the bootstrapper
|
name: Build and Upload the bootstrapper
|
||||||
|
|
||||||
on:
|
on:
|
||||||
@ -45,7 +45,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Copy bootstrapper to S3 if not exists
|
- name: Copy bootstrapper to S3 if not exists
|
||||||
id: copy
|
id: copy
|
||||||
# Only upload the Coordinator if this action is triggered from main branch
|
# Only upload the bootstrapper if this action is triggered from main branch
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
run: >
|
run: >
|
||||||
aws s3api head-object --bucket ${{ secrets.PUBLIC_BUCKET_NAME }} --key bootstrapper/$(ls | grep "bootstrapper-")
|
aws s3api head-object --bucket ${{ secrets.PUBLIC_BUCKET_NAME }} --key bootstrapper/$(ls | grep "bootstrapper-")
|
||||||
|
@ -20,12 +20,11 @@ calling the InitCluster function of our Kubernetes library, which does a `kubead
|
|||||||
## Join Flow
|
## Join Flow
|
||||||
|
|
||||||
The JoinClient is a gRPC client that is trying to connect to an JoinService, which might be running
|
The JoinClient is a gRPC client that is trying to connect to an JoinService, which might be running
|
||||||
in an already existing cluster as DaemonSet. If the JoinClient can connect to the JoinService, it tries
|
in an already existing cluster as DaemonSet. The JoinService is validating the instance which wants to join the cluster using
|
||||||
to issue a join ticket. The JoinService is validating the instance which wants to join the cluster using
|
|
||||||
aTLS. For details on the used protocol and the verification of a joining instances measurements, see the
|
aTLS. For details on the used protocol and the verification of a joining instances measurements, see the
|
||||||
[joinservice](./../joinservice) package.
|
[joinservice](./../joinservice) package.
|
||||||
|
|
||||||
If the JOinSerivce successfully verifies the instance, it issues a join ticket. The JoinClient then
|
If the JoinService successfully verifies the instance, it issues a join ticket. The JoinClient then
|
||||||
joins the cluster by calling the `kubeadm join` command, using the token and other needed information
|
joins the cluster by calling the `kubeadm join` command, using the token and other needed information
|
||||||
from the join ticket.
|
from the join ticket.
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ func (c *CloudControllerManager) Secrets(ctx context.Context, providerID string,
|
|||||||
SecurityGroupName: securityGroupName,
|
SecurityGroupName: securityGroupName,
|
||||||
LoadBalancerName: loadBalancerName,
|
LoadBalancerName: loadBalancerName,
|
||||||
UseInstanceMetadata: true,
|
UseInstanceMetadata: true,
|
||||||
VmType: vmType,
|
VMType: vmType,
|
||||||
Location: creds.Location,
|
Location: creds.Location,
|
||||||
AADClientID: creds.ClientID,
|
AADClientID: creds.ClientID,
|
||||||
AADClientSecret: creds.ClientSecret,
|
AADClientSecret: creds.ClientSecret,
|
||||||
@ -177,7 +177,7 @@ type cloudConfig struct {
|
|||||||
VNetResourceGroup string `json:"vnetResourceGroup,omitempty"`
|
VNetResourceGroup string `json:"vnetResourceGroup,omitempty"`
|
||||||
CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"`
|
CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"`
|
||||||
UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty"`
|
UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty"`
|
||||||
VmType string `json:"vmType,omitempty"`
|
VMType string `json:"vmType,omitempty"`
|
||||||
AADClientID string `json:"aadClientId,omitempty"`
|
AADClientID string `json:"aadClientId,omitempty"`
|
||||||
AADClientSecret string `json:"aadClientSecret,omitempty"`
|
AADClientSecret string `json:"aadClientSecret,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/bootstrapper/internal/exit"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/initserver"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/initserver"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/joinclient"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/joinclient"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/logging"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/logging"
|
||||||
@ -39,19 +40,29 @@ func run(issuer quoteIssuer, tpm vtpm.TPMOpenFunc, fileHandler file.Handler,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeLock := nodelock.New()
|
nodeLock := nodelock.New(tpm)
|
||||||
initServer := initserver.New(nodeLock, kube, issuer, fileHandler, logger)
|
initServer := initserver.New(nodeLock, kube, issuer, fileHandler, logger)
|
||||||
|
|
||||||
dialer := dialer.New(issuer, nil, &net.Dialer{})
|
dialer := dialer.New(issuer, nil, &net.Dialer{})
|
||||||
joinClient := joinclient.New(nodeLock, dialer, kube, metadata, logger)
|
joinClient := joinclient.New(nodeLock, dialer, kube, metadata, logger)
|
||||||
|
|
||||||
joinClient.Start()
|
cleaner := exit.New().
|
||||||
|
With(initServer).
|
||||||
|
With(joinClient)
|
||||||
|
joinClient.Start(cleaner)
|
||||||
|
|
||||||
if err := initServer.Serve(bindIP, bindPort); err != nil {
|
if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil {
|
||||||
logger.Error("Failed to serve init server", zap.Error(err))
|
logger.Error("Failed to serve init server", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
joinClient.Stop()
|
// wait for join client and server to exit cleanly
|
||||||
|
cleaner.Clean()
|
||||||
|
|
||||||
|
// if node lock was never acquired, then we didn't bootstrap successfully.
|
||||||
|
if !nodeLock.Locked() {
|
||||||
|
cloudLogger.Disclose("bootstrapper failed")
|
||||||
|
logger.Fatal("bootstrapper failed")
|
||||||
|
}
|
||||||
|
|
||||||
logger.Info("bootstrapper done")
|
logger.Info("bootstrapper done")
|
||||||
cloudLogger.Disclose("bootstrapper done")
|
cloudLogger.Disclose("bootstrapper done")
|
||||||
|
50
bootstrapper/internal/exit/exit.go
Normal file
50
bootstrapper/internal/exit/exit.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package exit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cleaner struct {
|
||||||
|
stoppers []stopper
|
||||||
|
|
||||||
|
cleanupDone bool
|
||||||
|
wg sync.WaitGroup
|
||||||
|
mux sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new cleaner.
|
||||||
|
func New(stoppers ...stopper) *cleaner {
|
||||||
|
return &cleaner{
|
||||||
|
stoppers: stoppers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// With adds a new stopper to the cleaner.
|
||||||
|
func (c *cleaner) With(stopper stopper) *cleaner {
|
||||||
|
c.stoppers = append(c.stoppers, stopper)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean stops all services gracefully.
|
||||||
|
func (c *cleaner) Clean() {
|
||||||
|
// only cleanup once
|
||||||
|
c.mux.Lock()
|
||||||
|
defer c.mux.Unlock()
|
||||||
|
if c.cleanupDone {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.wg.Add(len(c.stoppers))
|
||||||
|
for _, stopItem := range c.stoppers {
|
||||||
|
go func(stopItem stopper) {
|
||||||
|
stopItem.Stop()
|
||||||
|
c.wg.Done()
|
||||||
|
}(stopItem)
|
||||||
|
}
|
||||||
|
c.wg.Wait()
|
||||||
|
c.cleanupDone = true
|
||||||
|
}
|
||||||
|
|
||||||
|
type stopper interface {
|
||||||
|
Stop()
|
||||||
|
}
|
47
bootstrapper/internal/exit/exit_test.go
Normal file
47
bootstrapper/internal/exit/exit_test.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package exit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.uber.org/goleak"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
goleak.VerifyTestMain(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNew(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
cleaner := New(&spyStopper{})
|
||||||
|
assert.NotNil(cleaner)
|
||||||
|
assert.NotEmpty(cleaner.stoppers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWith(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
cleaner := New().With(&spyStopper{})
|
||||||
|
assert.NotEmpty(cleaner.stoppers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClean(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
stopper := &spyStopper{}
|
||||||
|
cleaner := New(stopper)
|
||||||
|
cleaner.Clean()
|
||||||
|
assert.True(stopper.stopped)
|
||||||
|
|
||||||
|
// call again to make sure it doesn't panic or block
|
||||||
|
cleaner.Clean()
|
||||||
|
}
|
||||||
|
|
||||||
|
type spyStopper struct {
|
||||||
|
stopped bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *spyStopper) Stop() {
|
||||||
|
s.stopped = true
|
||||||
|
}
|
@ -9,7 +9,6 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/bootstrapper/initproto"
|
"github.com/edgelesssys/constellation/bootstrapper/initproto"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/nodelock"
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/nodestate"
|
"github.com/edgelesssys/constellation/bootstrapper/nodestate"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/util"
|
"github.com/edgelesssys/constellation/bootstrapper/util"
|
||||||
@ -31,7 +30,7 @@ import (
|
|||||||
// The server handles initialization calls from the CLI and initializes the
|
// The server handles initialization calls from the CLI and initializes the
|
||||||
// Kubernetes cluster.
|
// Kubernetes cluster.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
nodeLock *nodelock.Lock
|
nodeLock locker
|
||||||
initializer ClusterInitializer
|
initializer ClusterInitializer
|
||||||
disk encryptedDisk
|
disk encryptedDisk
|
||||||
fileHandler file.Handler
|
fileHandler file.Handler
|
||||||
@ -43,7 +42,7 @@ type Server struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new initialization server.
|
// New creates a new initialization server.
|
||||||
func New(lock *nodelock.Lock, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, logger *zap.Logger) *Server {
|
func New(lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, logger *zap.Logger) *Server {
|
||||||
logger = logger.Named("initServer")
|
logger = logger.Named("initServer")
|
||||||
server := &Server{
|
server := &Server{
|
||||||
nodeLock: lock,
|
nodeLock: lock,
|
||||||
@ -69,20 +68,32 @@ func New(lock *nodelock.Lock, kube ClusterInitializer, issuer atls.Issuer, fh fi
|
|||||||
return server
|
return server
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Serve(ip, port string) error {
|
// Serve starts the initialization server.
|
||||||
|
func (s *Server) Serve(ip, port string, cleaner cleaner) error {
|
||||||
lis, err := net.Listen("tcp", net.JoinHostPort(ip, port))
|
lis, err := net.Listen("tcp", net.JoinHostPort(ip, port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to listen: %w", err)
|
return fmt.Errorf("failed to listen: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.grpcServer.Serve(lis)
|
err = s.grpcServer.Serve(lis)
|
||||||
|
cleaner.Clean()
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes the cluster.
|
// Init initializes the cluster.
|
||||||
func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) {
|
func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) {
|
||||||
s.logger.Info("Init called")
|
s.logger.Info("Init called")
|
||||||
|
|
||||||
if ok := s.nodeLock.TryLockOnce(); !ok {
|
id, err := s.deriveAttestationID(req.MasterSecret)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeLockAcquired, err := s.nodeLock.TryLockOnce(id.Owner, id.Cluster)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "locking node: %s", err)
|
||||||
|
}
|
||||||
|
if !nodeLockAcquired {
|
||||||
// The join client seems to already have a connection to an
|
// The join client seems to already have a connection to an
|
||||||
// existing join service. At this point, any further call to
|
// existing join service. At this point, any further call to
|
||||||
// init does not make sense, so we just stop.
|
// init does not make sense, so we just stop.
|
||||||
@ -93,11 +104,6 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
|||||||
return nil, status.Error(codes.FailedPrecondition, "node is already being activated")
|
return nil, status.Error(codes.FailedPrecondition, "node is already being activated")
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := s.deriveAttestationID(req.MasterSecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Errorf(codes.Internal, "%s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.setupDisk(req.MasterSecret); err != nil {
|
if err := s.setupDisk(req.MasterSecret); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "setting up disk: %s", err)
|
return nil, status.Errorf(codes.Internal, "setting up disk: %s", err)
|
||||||
}
|
}
|
||||||
@ -131,6 +137,7 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Info("Init succeeded")
|
s.logger.Info("Init succeeded")
|
||||||
|
go s.grpcServer.GracefulStop()
|
||||||
return &initproto.InitResponse{
|
return &initproto.InitResponse{
|
||||||
Kubeconfig: kubeconfig,
|
Kubeconfig: kubeconfig,
|
||||||
OwnerId: id.Owner,
|
OwnerId: id.Owner,
|
||||||
@ -138,6 +145,11 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops the initialization server gracefully.
|
||||||
|
func (s *Server) Stop() {
|
||||||
|
s.grpcServer.GracefulStop()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) setupDisk(masterSecret []byte) error {
|
func (s *Server) setupDisk(masterSecret []byte) error {
|
||||||
if err := s.disk.Open(); err != nil {
|
if err := s.disk.Open(); err != nil {
|
||||||
return fmt.Errorf("opening encrypted disk: %w", err)
|
return fmt.Errorf("opening encrypted disk: %w", err)
|
||||||
@ -214,3 +226,13 @@ type serveStopper interface {
|
|||||||
// GracefulStop stops the server and blocks until all requests are done.
|
// GracefulStop stops the server and blocks until all requests are done.
|
||||||
GracefulStop()
|
GracefulStop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type locker interface {
|
||||||
|
// TryLockOnce tries to lock the node. If the node is already locked, it
|
||||||
|
// returns false. If the node is unlocked, it locks it and returns true.
|
||||||
|
TryLockOnce(ownerID, clusterID []byte) (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type cleaner interface {
|
||||||
|
Clean()
|
||||||
|
}
|
||||||
|
@ -4,12 +4,12 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/initproto"
|
"github.com/edgelesssys/constellation/bootstrapper/initproto"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/nodelock"
|
|
||||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||||
"github.com/edgelesssys/constellation/internal/file"
|
"github.com/edgelesssys/constellation/internal/file"
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
@ -28,7 +28,7 @@ func TestNew(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
fh := file.NewHandler(afero.NewMemMapFs())
|
||||||
server := New(nodelock.New(), &stubClusterInitializer{}, nil, fh, zap.NewNop())
|
server := New(newFakeLock(), &stubClusterInitializer{}, nil, fh, zap.NewNop())
|
||||||
assert.NotNil(server)
|
assert.NotNil(server)
|
||||||
assert.NotNil(server.logger)
|
assert.NotNil(server.logger)
|
||||||
assert.NotNil(server.nodeLock)
|
assert.NotNil(server.nodeLock)
|
||||||
@ -40,11 +40,13 @@ func TestNew(t *testing.T) {
|
|||||||
|
|
||||||
func TestInit(t *testing.T) {
|
func TestInit(t *testing.T) {
|
||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
lockedNodeLock := nodelock.New()
|
lockedLock := newFakeLock()
|
||||||
require.True(t, lockedNodeLock.TryLockOnce())
|
aqcuiredLock, lockErr := lockedLock.TryLockOnce(nil, nil)
|
||||||
|
require.True(t, aqcuiredLock)
|
||||||
|
require.Nil(t, lockErr)
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
nodeLock *nodelock.Lock
|
nodeLock *fakeLock
|
||||||
initializer ClusterInitializer
|
initializer ClusterInitializer
|
||||||
disk encryptedDisk
|
disk encryptedDisk
|
||||||
fileHandler file.Handler
|
fileHandler file.Handler
|
||||||
@ -53,14 +55,14 @@ func TestInit(t *testing.T) {
|
|||||||
wantShutdown bool
|
wantShutdown bool
|
||||||
}{
|
}{
|
||||||
"successful init": {
|
"successful init": {
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
initializer: &stubClusterInitializer{},
|
initializer: &stubClusterInitializer{},
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
req: &initproto.InitRequest{},
|
req: &initproto.InitRequest{},
|
||||||
},
|
},
|
||||||
"node locked": {
|
"node locked": {
|
||||||
nodeLock: lockedNodeLock,
|
nodeLock: lockedLock,
|
||||||
initializer: &stubClusterInitializer{},
|
initializer: &stubClusterInitializer{},
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
@ -69,7 +71,7 @@ func TestInit(t *testing.T) {
|
|||||||
wantShutdown: true,
|
wantShutdown: true,
|
||||||
},
|
},
|
||||||
"disk open error": {
|
"disk open error": {
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
initializer: &stubClusterInitializer{},
|
initializer: &stubClusterInitializer{},
|
||||||
disk: &stubDisk{openErr: someErr},
|
disk: &stubDisk{openErr: someErr},
|
||||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
@ -77,7 +79,7 @@ func TestInit(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"disk uuid error": {
|
"disk uuid error": {
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
initializer: &stubClusterInitializer{},
|
initializer: &stubClusterInitializer{},
|
||||||
disk: &stubDisk{uuidErr: someErr},
|
disk: &stubDisk{uuidErr: someErr},
|
||||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
@ -85,7 +87,7 @@ func TestInit(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"disk update passphrase error": {
|
"disk update passphrase error": {
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
initializer: &stubClusterInitializer{},
|
initializer: &stubClusterInitializer{},
|
||||||
disk: &stubDisk{updatePassphraseErr: someErr},
|
disk: &stubDisk{updatePassphraseErr: someErr},
|
||||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
@ -93,7 +95,7 @@ func TestInit(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"write state file error": {
|
"write state file error": {
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
initializer: &stubClusterInitializer{},
|
initializer: &stubClusterInitializer{},
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
fileHandler: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())),
|
fileHandler: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())),
|
||||||
@ -101,7 +103,7 @@ func TestInit(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"initialize cluster error": {
|
"initialize cluster error": {
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
initializer: &stubClusterInitializer{initClusterErr: someErr},
|
initializer: &stubClusterInitializer{initClusterErr: someErr},
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
@ -142,7 +144,7 @@ func TestInit(t *testing.T) {
|
|||||||
|
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.NotNil(kubeconfig)
|
assert.NotNil(kubeconfig)
|
||||||
assert.False(server.nodeLock.TryLockOnce()) // lock should be locked
|
assert.False(server.nodeLock.TryLockOnce(nil, nil)) // lock should be locked
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -237,3 +239,17 @@ func (s *stubServeStopper) Serve(net.Listener) error {
|
|||||||
func (s *stubServeStopper) GracefulStop() {
|
func (s *stubServeStopper) GracefulStop() {
|
||||||
s.shutdownCalled <- struct{}{}
|
s.shutdownCalled <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fakeLock struct {
|
||||||
|
state *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFakeLock() *fakeLock {
|
||||||
|
return &fakeLock{
|
||||||
|
state: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *fakeLock) TryLockOnce(_, _ []byte) (bool, error) {
|
||||||
|
return l.state.TryLock(), nil
|
||||||
|
}
|
||||||
|
@ -10,7 +10,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/diskencryption"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/nodelock"
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/nodestate"
|
"github.com/edgelesssys/constellation/bootstrapper/nodestate"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||||
@ -33,7 +32,7 @@ const (
|
|||||||
// JoinClient is a client for requesting the needed information and
|
// JoinClient is a client for requesting the needed information and
|
||||||
// joining an existing Kubernetes cluster.
|
// joining an existing Kubernetes cluster.
|
||||||
type JoinClient struct {
|
type JoinClient struct {
|
||||||
nodeLock *nodelock.Lock
|
nodeLock locker
|
||||||
diskUUID string
|
diskUUID string
|
||||||
nodeName string
|
nodeName string
|
||||||
role role.Role
|
role role.Role
|
||||||
@ -57,7 +56,7 @@ type JoinClient struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new JoinClient.
|
// New creates a new JoinClient.
|
||||||
func New(lock *nodelock.Lock, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *zap.Logger) *JoinClient {
|
func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *zap.Logger) *JoinClient {
|
||||||
return &JoinClient{
|
return &JoinClient{
|
||||||
nodeLock: lock,
|
nodeLock: lock,
|
||||||
disk: diskencryption.New(),
|
disk: diskencryption.New(),
|
||||||
@ -78,7 +77,7 @@ func New(lock *nodelock.Lock, dial grpcDialer, joiner ClusterJoiner, meta Metada
|
|||||||
// After receiving the needed information, the node will join the cluster.
|
// After receiving the needed information, the node will join the cluster.
|
||||||
// Multiple calls of start on the same client won't start a second routine if there is
|
// Multiple calls of start on the same client won't start a second routine if there is
|
||||||
// already a routine running.
|
// already a routine running.
|
||||||
func (c *JoinClient) Start() {
|
func (c *JoinClient) Start(cleaner cleaner) {
|
||||||
c.mux.Lock()
|
c.mux.Lock()
|
||||||
defer c.mux.Unlock()
|
defer c.mux.Unlock()
|
||||||
|
|
||||||
@ -123,6 +122,7 @@ func (c *JoinClient) Start() {
|
|||||||
err := c.tryJoinWithAvailableServices()
|
err := c.tryJoinWithAvailableServices()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.log.Info("Joined successfully. Client is shut down.")
|
c.log.Info("Joined successfully. Client is shut down.")
|
||||||
|
go cleaner.Clean()
|
||||||
return
|
return
|
||||||
} else if isUnrecoverable(err) {
|
} else if isUnrecoverable(err) {
|
||||||
c.log.Error("Unrecoverable error occurred", zap.Error(err))
|
c.log.Error("Unrecoverable error occurred", zap.Error(err))
|
||||||
@ -220,7 +220,12 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse)
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if ok := c.nodeLock.TryLockOnce(); !ok {
|
nodeLockAcquired, err := c.nodeLock.TryLockOnce(ticket.OwnerId, ticket.ClusterId)
|
||||||
|
if err != nil {
|
||||||
|
c.log.Info("Acquiring node lock failed", zap.Error(err))
|
||||||
|
return fmt.Errorf("acquiring node lock: %w", err)
|
||||||
|
}
|
||||||
|
if !nodeLockAcquired {
|
||||||
// There is already a cluster initialization in progress on
|
// There is already a cluster initialization in progress on
|
||||||
// this node, so there is no need to also join the cluster,
|
// this node, so there is no need to also join the cluster,
|
||||||
// as the initializing node is automatically part of the cluster.
|
// as the initializing node is automatically part of the cluster.
|
||||||
@ -359,3 +364,13 @@ type encryptedDisk interface {
|
|||||||
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
// UpdatePassphrase switches the initial random passphrase of the encrypted disk to a permanent passphrase.
|
||||||
UpdatePassphrase(passphrase string) error
|
UpdatePassphrase(passphrase string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type cleaner interface {
|
||||||
|
Clean()
|
||||||
|
}
|
||||||
|
|
||||||
|
type locker interface {
|
||||||
|
// TryLockOnce tries to lock the node. If the node is already locked, it
|
||||||
|
// returns false. If the node is unlocked, it locks it and returns true.
|
||||||
|
TryLockOnce(ownerID, clusterID []byte) (bool, error)
|
||||||
|
}
|
||||||
|
@ -9,7 +9,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/nodelock"
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||||
"github.com/edgelesssys/constellation/internal/constants"
|
"github.com/edgelesssys/constellation/internal/constants"
|
||||||
@ -35,8 +34,10 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
func TestClient(t *testing.T) {
|
func TestClient(t *testing.T) {
|
||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
lockedLock := nodelock.New()
|
lockedLock := newFakeLock()
|
||||||
require.True(t, lockedLock.TryLockOnce())
|
aqcuiredLock, lockErr := lockedLock.TryLockOnce(nil, nil)
|
||||||
|
require.True(t, aqcuiredLock)
|
||||||
|
require.Nil(t, lockErr)
|
||||||
workerSelf := metadata.InstanceMetadata{Role: role.Worker, Name: "node-1"}
|
workerSelf := metadata.InstanceMetadata{Role: role.Worker, Name: "node-1"}
|
||||||
controlSelf := metadata.InstanceMetadata{Role: role.ControlPlane, Name: "node-5"}
|
controlSelf := metadata.InstanceMetadata{Role: role.ControlPlane, Name: "node-5"}
|
||||||
peers := []metadata.InstanceMetadata{
|
peers := []metadata.InstanceMetadata{
|
||||||
@ -49,7 +50,7 @@ func TestClient(t *testing.T) {
|
|||||||
role role.Role
|
role role.Role
|
||||||
clusterJoiner *stubClusterJoiner
|
clusterJoiner *stubClusterJoiner
|
||||||
disk encryptedDisk
|
disk encryptedDisk
|
||||||
nodeLock *nodelock.Lock
|
nodeLock *fakeLock
|
||||||
apiAnswers []any
|
apiAnswers []any
|
||||||
wantLock bool
|
wantLock bool
|
||||||
wantJoin bool
|
wantJoin bool
|
||||||
@ -65,7 +66,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -81,7 +82,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -97,7 +98,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -113,7 +114,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -130,7 +131,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -147,7 +148,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -160,7 +161,7 @@ func TestClient(t *testing.T) {
|
|||||||
issueJoinTicketAnswer{},
|
issueJoinTicketAnswer{},
|
||||||
},
|
},
|
||||||
clusterJoiner: &stubClusterJoiner{joinClusterErr: someErr},
|
clusterJoiner: &stubClusterJoiner{joinClusterErr: someErr},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{},
|
disk: &stubDisk{},
|
||||||
wantJoin: true,
|
wantJoin: true,
|
||||||
wantLock: true,
|
wantLock: true,
|
||||||
@ -180,13 +181,13 @@ func TestClient(t *testing.T) {
|
|||||||
"on control plane: disk open fails": {
|
"on control plane: disk open fails": {
|
||||||
role: role.ControlPlane,
|
role: role.ControlPlane,
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{openErr: someErr},
|
disk: &stubDisk{openErr: someErr},
|
||||||
},
|
},
|
||||||
"on control plane: disk uuid fails": {
|
"on control plane: disk uuid fails": {
|
||||||
role: role.ControlPlane,
|
role: role.ControlPlane,
|
||||||
clusterJoiner: &stubClusterJoiner{},
|
clusterJoiner: &stubClusterJoiner{},
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
disk: &stubDisk{uuidErr: someErr},
|
disk: &stubDisk{uuidErr: someErr},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -224,7 +225,7 @@ func TestClient(t *testing.T) {
|
|||||||
go joinServer.Serve(listener)
|
go joinServer.Serve(listener)
|
||||||
defer joinServer.GracefulStop()
|
defer joinServer.GracefulStop()
|
||||||
|
|
||||||
client.Start()
|
client.Start(stubCleaner{})
|
||||||
|
|
||||||
for _, a := range tc.apiAnswers {
|
for _, a := range tc.apiAnswers {
|
||||||
switch a := a.(type) {
|
switch a := a.(type) {
|
||||||
@ -246,9 +247,9 @@ func TestClient(t *testing.T) {
|
|||||||
assert.False(tc.clusterJoiner.joinClusterCalled)
|
assert.False(tc.clusterJoiner.joinClusterCalled)
|
||||||
}
|
}
|
||||||
if tc.wantLock {
|
if tc.wantLock {
|
||||||
assert.False(client.nodeLock.TryLockOnce()) // lock should be locked
|
assert.False(client.nodeLock.TryLockOnce(nil, nil)) // lock should be locked
|
||||||
} else {
|
} else {
|
||||||
assert.True(client.nodeLock.TryLockOnce())
|
assert.True(client.nodeLock.TryLockOnce(nil, nil))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -258,7 +259,7 @@ func TestClientConcurrentStartStop(t *testing.T) {
|
|||||||
netDialer := testdialer.NewBufconnDialer()
|
netDialer := testdialer.NewBufconnDialer()
|
||||||
dialer := dialer.New(nil, nil, netDialer)
|
dialer := dialer.New(nil, nil, netDialer)
|
||||||
client := &JoinClient{
|
client := &JoinClient{
|
||||||
nodeLock: nodelock.New(),
|
nodeLock: newFakeLock(),
|
||||||
timeout: 30 * time.Second,
|
timeout: 30 * time.Second,
|
||||||
interval: 30 * time.Second,
|
interval: 30 * time.Second,
|
||||||
dialer: dialer,
|
dialer: dialer,
|
||||||
@ -274,7 +275,7 @@ func TestClientConcurrentStartStop(t *testing.T) {
|
|||||||
|
|
||||||
start := func() {
|
start := func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
client.Start()
|
client.Start(stubCleaner{})
|
||||||
}
|
}
|
||||||
|
|
||||||
stop := func() {
|
stop := func() {
|
||||||
@ -415,3 +416,21 @@ func (d *stubDisk) UpdatePassphrase(string) error {
|
|||||||
d.updatePassphraseCalled = true
|
d.updatePassphraseCalled = true
|
||||||
return d.updatePassphraseErr
|
return d.updatePassphraseErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type stubCleaner struct{}
|
||||||
|
|
||||||
|
func (c stubCleaner) Clean() {}
|
||||||
|
|
||||||
|
type fakeLock struct {
|
||||||
|
state *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFakeLock() *fakeLock {
|
||||||
|
return &fakeLock{
|
||||||
|
state: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *fakeLock) TryLockOnce(_, _ []byte) (bool, error) {
|
||||||
|
return l.state.TryLock(), nil
|
||||||
|
}
|
||||||
|
@ -177,7 +177,7 @@ func (k *KubeadmJoinYAML) SetNodeName(nodeName string) {
|
|||||||
k.JoinConfiguration.NodeRegistration.Name = nodeName
|
k.JoinConfiguration.NodeRegistration.Name = nodeName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *KubeadmJoinYAML) SetApiServerEndpoint(apiServerEndpoint string) {
|
func (k *KubeadmJoinYAML) SetAPIServerEndpoint(apiServerEndpoint string) {
|
||||||
k.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
|
k.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,7 +240,7 @@ func (k *KubeadmInitYAML) SetCertSANs(certSANs []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *KubeadmInitYAML) SetApiServerAdvertiseAddress(apiServerAdvertiseAddress string) {
|
func (k *KubeadmInitYAML) SetAPIServerAdvertiseAddress(apiServerAdvertiseAddress string) {
|
||||||
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
|
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ func TestInitConfiguration(t *testing.T) {
|
|||||||
"CoreOS init config with all fields can be created": {
|
"CoreOS init config with all fields can be created": {
|
||||||
config: func() KubeadmInitYAML {
|
config: func() KubeadmInitYAML {
|
||||||
c := coreOSConfig.InitConfiguration(true)
|
c := coreOSConfig.InitConfiguration(true)
|
||||||
c.SetApiServerAdvertiseAddress("192.0.2.0")
|
c.SetAPIServerAdvertiseAddress("192.0.2.0")
|
||||||
c.SetNodeIP("192.0.2.0")
|
c.SetNodeIP("192.0.2.0")
|
||||||
c.SetNodeName("node")
|
c.SetNodeName("node")
|
||||||
c.SetPodNetworkCIDR("10.244.0.0/16")
|
c.SetPodNetworkCIDR("10.244.0.0/16")
|
||||||
@ -62,7 +62,7 @@ func TestJoinConfiguration(t *testing.T) {
|
|||||||
"CoreOS join config with all fields can be created": {
|
"CoreOS join config with all fields can be created": {
|
||||||
config: func() KubeadmJoinYAML {
|
config: func() KubeadmJoinYAML {
|
||||||
c := coreOSConfig.JoinConfiguration(true)
|
c := coreOSConfig.JoinConfiguration(true)
|
||||||
c.SetApiServerEndpoint("192.0.2.0:6443")
|
c.SetAPIServerEndpoint("192.0.2.0:6443")
|
||||||
c.SetNodeIP("192.0.2.0")
|
c.SetNodeIP("192.0.2.0")
|
||||||
c.SetNodeName("node")
|
c.SetNodeName("node")
|
||||||
c.SetToken("token")
|
c.SetToken("token")
|
||||||
|
@ -15,7 +15,7 @@ func NewImagePullSecret() k8s.Secret {
|
|||||||
[]byte(fmt.Sprintf("%s:%s", secrets.PullSecretUser, secrets.PullSecretToken)),
|
[]byte(fmt.Sprintf("%s:%s", secrets.PullSecretUser, secrets.PullSecretToken)),
|
||||||
)
|
)
|
||||||
|
|
||||||
pullSecretDockerCfgJson := fmt.Sprintf(`{"auths":{"ghcr.io":{"auth":"%s"}}}`, base64EncodedSecret)
|
pullSecretDockerCfgJSON := fmt.Sprintf(`{"auths":{"ghcr.io":{"auth":"%s"}}}`, base64EncodedSecret)
|
||||||
|
|
||||||
return k8s.Secret{
|
return k8s.Secret{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
@ -26,7 +26,7 @@ func NewImagePullSecret() k8s.Secret {
|
|||||||
Name: secrets.PullSecretName,
|
Name: secrets.PullSecretName,
|
||||||
Namespace: "kube-system",
|
Namespace: "kube-system",
|
||||||
},
|
},
|
||||||
StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJson},
|
StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJSON},
|
||||||
Type: "kubernetes.io/dockerconfigjson",
|
Type: "kubernetes.io/dockerconfigjson",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigMaps represent a list of k8s Secret.
|
// Secrets represent a list of k8s Secret.
|
||||||
type Secrets []*k8s.Secret
|
type Secrets []*k8s.Secret
|
||||||
|
|
||||||
// Marshal marshals secrets into multiple YAML documents.
|
// Marshal marshals secrets into multiple YAML documents.
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -23,10 +22,7 @@ const (
|
|||||||
kubeletStartTimeout = 10 * time.Minute
|
kubeletStartTimeout = 10 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
|
||||||
kubernetesKeyRegexp = regexp.MustCompile("[a-f0-9]{64}")
|
|
||||||
providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client provides the functionality of `kubectl apply`.
|
// Client provides the functionality of `kubectl apply`.
|
||||||
type Client interface {
|
type Client interface {
|
||||||
@ -35,28 +31,12 @@ type Client interface {
|
|||||||
// TODO: add tolerations
|
// TODO: add tolerations
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClusterUtil interface {
|
|
||||||
InstallComponents(ctx context.Context, version string) error
|
|
||||||
InitCluster(initConfig []byte) error
|
|
||||||
JoinCluster(joinConfig []byte) error
|
|
||||||
SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error
|
|
||||||
SetupAccessManager(kubectl Client, accessManagerConfiguration resources.Marshaler) error
|
|
||||||
SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
|
||||||
SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
|
||||||
SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
|
||||||
SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error
|
|
||||||
StartKubelet() error
|
|
||||||
RestartKubelet() error
|
|
||||||
GetControlPlaneJoinCertificateKey() (string, error)
|
|
||||||
CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KubernetesUtil provides low level management of the kubernetes cluster.
|
// KubernetesUtil provides low level management of the kubernetes cluster.
|
||||||
type KubernetesUtil struct {
|
type KubernetesUtil struct {
|
||||||
inst installer
|
inst installer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKubernetesUtils creates a new KubernetesUtil.
|
// NewKubernetesUtil creates a new KubernetesUtil.
|
||||||
func NewKubernetesUtil() *KubernetesUtil {
|
func NewKubernetesUtil() *KubernetesUtil {
|
||||||
return &KubernetesUtil{
|
return &KubernetesUtil{
|
||||||
inst: newOSInstaller(),
|
inst: newOSInstaller(),
|
||||||
@ -91,7 +71,6 @@ func (k *KubernetesUtil) InitCluster(ctx context.Context, initConfig []byte, log
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating init config file %v: %w", initConfigFile.Name(), err)
|
return fmt.Errorf("creating init config file %v: %w", initConfigFile.Name(), err)
|
||||||
}
|
}
|
||||||
// defer os.Remove(initConfigFile.Name())
|
|
||||||
|
|
||||||
if _, err := initConfigFile.Write(initConfig); err != nil {
|
if _, err := initConfigFile.Write(initConfig); err != nil {
|
||||||
return fmt.Errorf("writing kubeadm init yaml config %v: %w", initConfigFile.Name(), err)
|
return fmt.Errorf("writing kubeadm init yaml config %v: %w", initConfigFile.Name(), err)
|
||||||
@ -296,7 +275,6 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating join config file %v: %w", joinConfigFile.Name(), err)
|
return fmt.Errorf("creating join config file %v: %w", joinConfigFile.Name(), err)
|
||||||
}
|
}
|
||||||
// defer os.Remove(joinConfigFile.Name())
|
|
||||||
|
|
||||||
if _, err := joinConfigFile.Write(joinConfig); err != nil {
|
if _, err := joinConfigFile.Write(joinConfig); err != nil {
|
||||||
return fmt.Errorf("writing kubeadm init yaml config %v: %w", joinConfigFile.Name(), err)
|
return fmt.Errorf("writing kubeadm init yaml config %v: %w", joinConfigFile.Name(), err)
|
||||||
@ -333,40 +311,3 @@ func (k *KubernetesUtil) RestartKubelet() error {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
return restartSystemdUnit(ctx, "kubelet.service")
|
return restartSystemdUnit(ctx, "kubelet.service")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetControlPlaneJoinCertificateKey return the key which can be used in combination with the joinArgs
|
|
||||||
// to join the Cluster as control-plane.
|
|
||||||
func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error) {
|
|
||||||
// Key will be valid for 1h (no option to reduce the duration).
|
|
||||||
// https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-upload-certs
|
|
||||||
output, err := exec.CommandContext(ctx, kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output()
|
|
||||||
if err != nil {
|
|
||||||
var exitErr *exec.ExitError
|
|
||||||
if errors.As(err, &exitErr) {
|
|
||||||
return "", fmt.Errorf("kubeadm upload-certs failed (code %v) with: %s (full err: %s)", exitErr.ExitCode(), exitErr.Stderr, err)
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("kubeadm upload-certs: %w", err)
|
|
||||||
}
|
|
||||||
// Example output:
|
|
||||||
/*
|
|
||||||
[upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace
|
|
||||||
[upload-certs] Using certificate key:
|
|
||||||
9555b74008f24687eb964bd90a164ecb5760a89481d9c55a77c129b7db438168
|
|
||||||
*/
|
|
||||||
key := kubernetesKeyRegexp.FindString(string(output))
|
|
||||||
if key == "" {
|
|
||||||
return "", fmt.Errorf("failed to parse kubeadm output: %s", string(output))
|
|
||||||
}
|
|
||||||
return key, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateJoinToken creates a new bootstrap (join) token.
|
|
||||||
func (k *KubernetesUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
|
||||||
output, err := exec.CommandContext(ctx, kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("kubeadm token create: %w", err)
|
|
||||||
}
|
|
||||||
// `kubeadm token create [...] --print-join-command` outputs the following format:
|
|
||||||
// kubeadm join [API_SERVER_ENDPOINT] --token [TOKEN] --discovery-token-ca-cert-hash [DISCOVERY_TOKEN_CA_CERT_HASH]
|
|
||||||
return ParseJoinCommand(string(output))
|
|
||||||
}
|
|
||||||
|
@ -2,12 +2,10 @@ package kubernetes
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type clusterUtil interface {
|
type clusterUtil interface {
|
||||||
@ -25,7 +23,5 @@ type clusterUtil interface {
|
|||||||
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration resources.Marshaler) error
|
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration resources.Marshaler) error
|
||||||
StartKubelet() error
|
StartKubelet() error
|
||||||
RestartKubelet() error
|
RestartKubelet() error
|
||||||
GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error)
|
|
||||||
CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
|
||||||
FixCilium(nodeNameK8s string)
|
FixCilium(nodeNameK8s string)
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
||||||
@ -201,7 +200,7 @@ func (k *KubeWrapper) InitCluster(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go k.clusterUtil.FixCilium(nodeName)
|
k.clusterUtil.FixCilium(nodeName)
|
||||||
|
|
||||||
return k.GetKubeconfig()
|
return k.GetKubeconfig()
|
||||||
}
|
}
|
||||||
@ -236,7 +235,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
|||||||
// Step 2: configure kubeadm join config
|
// Step 2: configure kubeadm join config
|
||||||
|
|
||||||
joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported())
|
joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported())
|
||||||
joinConfig.SetApiServerEndpoint(args.APIServerEndpoint)
|
joinConfig.SetAPIServerEndpoint(args.APIServerEndpoint)
|
||||||
joinConfig.SetToken(args.Token)
|
joinConfig.SetToken(args.Token)
|
||||||
joinConfig.AppendDiscoveryTokenCaCertHash(args.CACertHashes[0])
|
joinConfig.AppendDiscoveryTokenCaCertHash(args.CACertHashes[0])
|
||||||
joinConfig.SetNodeIP(nodeInternalIP)
|
joinConfig.SetNodeIP(nodeInternalIP)
|
||||||
@ -253,7 +252,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
|||||||
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
|
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go k.clusterUtil.FixCilium(nodeName)
|
k.clusterUtil.FixCilium(nodeName)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -263,16 +262,6 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
|
|||||||
return k.kubeconfigReader.ReadKubeconfig()
|
return k.kubeconfigReader.ReadKubeconfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetKubeadmCertificateKey return the key needed to join the Cluster as Control-Plane (has to be executed on a control-plane; errors otherwise).
|
|
||||||
func (k *KubeWrapper) GetKubeadmCertificateKey(ctx context.Context) (string, error) {
|
|
||||||
return k.clusterUtil.GetControlPlaneJoinCertificateKey(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJoinToken returns a bootstrap (join) token.
|
|
||||||
func (k *KubeWrapper) GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
|
||||||
return k.clusterUtil.CreateJoinToken(ctx, ttl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *KubeWrapper) setupJoinService(csp string, measurementsJSON []byte, id attestationtypes.ID) error {
|
func (k *KubeWrapper) setupJoinService(csp string, measurementsJSON []byte, id attestationtypes.ID) error {
|
||||||
idJSON, err := json.Marshal(id)
|
idJSON, err := json.Marshal(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi"
|
||||||
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
|
||||||
@ -26,7 +25,7 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
func TestInitCluster(t *testing.T) {
|
func TestInitCluster(t *testing.T) {
|
||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
serviceAccountUri := "some-service-account-uri"
|
serviceAccountURI := "some-service-account-uri"
|
||||||
masterSecret := []byte("some-master-secret")
|
masterSecret := []byte("some-master-secret")
|
||||||
autoscalingNodeGroups := []string{"0,10,autoscaling_group_0"}
|
autoscalingNodeGroups := []string{"0,10,autoscaling_group_0"}
|
||||||
|
|
||||||
@ -270,7 +269,7 @@ func TestInitCluster(t *testing.T) {
|
|||||||
kubeconfigReader: tc.kubeconfigReader,
|
kubeconfigReader: tc.kubeconfigReader,
|
||||||
getIPAddr: func() (string, error) { return privateIP, nil },
|
getIPAddr: func() (string, error) { return privateIP, nil },
|
||||||
}
|
}
|
||||||
_, err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, k8sVersion, attestationtypes.ID{}, KMSConfig{MasterSecret: masterSecret}, nil, zaptest.NewLogger(t))
|
_, err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountURI, k8sVersion, attestationtypes.ID{}, KMSConfig{MasterSecret: masterSecret}, nil, zaptest.NewLogger(t))
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
@ -490,8 +489,6 @@ type stubClusterUtil struct {
|
|||||||
joinClusterErr error
|
joinClusterErr error
|
||||||
startKubeletErr error
|
startKubeletErr error
|
||||||
restartKubeletErr error
|
restartKubeletErr error
|
||||||
createJoinTokenResponse *kubeadm.BootstrapTokenDiscovery
|
|
||||||
createJoinTokenErr error
|
|
||||||
|
|
||||||
initConfigs [][]byte
|
initConfigs [][]byte
|
||||||
joinConfigs [][]byte
|
joinConfigs [][]byte
|
||||||
@ -555,14 +552,6 @@ func (s *stubClusterUtil) RestartKubelet() error {
|
|||||||
return s.restartKubeletErr
|
return s.restartKubeletErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClusterUtil) GetControlPlaneJoinCertificateKey(context.Context) (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubClusterUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
|
||||||
return s.createJoinTokenResponse, s.createJoinTokenErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubClusterUtil) FixCilium(nodeName string) {
|
func (s *stubClusterUtil) FixCilium(nodeName string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
package nodelock
|
package nodelock
|
||||||
|
|
||||||
import "sync"
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/internal/attestation/vtpm"
|
||||||
|
)
|
||||||
|
|
||||||
// Lock locks the node once there the join or the init is at a point
|
// Lock locks the node once there the join or the init is at a point
|
||||||
// where there is no turning back and the other operation does not need
|
// where there is no turning back and the other operation does not need
|
||||||
@ -10,16 +14,39 @@ import "sync"
|
|||||||
// There is no way to unlock, so the state changes only once from unlock to
|
// There is no way to unlock, so the state changes only once from unlock to
|
||||||
// locked.
|
// locked.
|
||||||
type Lock struct {
|
type Lock struct {
|
||||||
mux *sync.Mutex
|
tpm vtpm.TPMOpenFunc
|
||||||
|
locked bool
|
||||||
|
state *sync.Mutex
|
||||||
|
mux *sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new NodeLock, which is unlocked.
|
// New creates a new NodeLock, which is unlocked.
|
||||||
func New() *Lock {
|
func New(tpm vtpm.TPMOpenFunc) *Lock {
|
||||||
return &Lock{mux: &sync.Mutex{}}
|
return &Lock{
|
||||||
|
tpm: tpm,
|
||||||
|
state: &sync.Mutex{},
|
||||||
|
mux: &sync.RWMutex{},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryLockOnce tries to lock the node. If the node is already locked, it
|
// TryLockOnce tries to lock the node. If the node is already locked, it
|
||||||
// returns false. If the node is unlocked, it locks it and returns true.
|
// returns false. If the node is unlocked, it locks it and returns true.
|
||||||
func (n *Lock) TryLockOnce() bool {
|
func (l *Lock) TryLockOnce(ownerID, clusterID []byte) (bool, error) {
|
||||||
return n.mux.TryLock()
|
success := l.state.TryLock()
|
||||||
|
if success {
|
||||||
|
l.mux.Lock()
|
||||||
|
defer l.mux.Unlock()
|
||||||
|
l.locked = true
|
||||||
|
if err := vtpm.MarkNodeAsBootstrapped(l.tpm, ownerID, clusterID); err != nil {
|
||||||
|
return success, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return success, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locked returns true if the node is locked.
|
||||||
|
func (l *Lock) Locked() bool {
|
||||||
|
l.mux.RLock()
|
||||||
|
defer l.mux.RUnlock()
|
||||||
|
return l.locked
|
||||||
}
|
}
|
||||||
|
@ -15,24 +15,24 @@ func TestMain(m *testing.M) {
|
|||||||
func TestMarshal(t *testing.T) {
|
func TestMarshal(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
role Role
|
role Role
|
||||||
wantJson string
|
wantJSON string
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"controlePlane role": {
|
"controlePlane role": {
|
||||||
role: ControlPlane,
|
role: ControlPlane,
|
||||||
wantJson: `"ControlPlane"`,
|
wantJSON: `"ControlPlane"`,
|
||||||
},
|
},
|
||||||
"node role": {
|
"node role": {
|
||||||
role: Worker,
|
role: Worker,
|
||||||
wantJson: `"Worker"`,
|
wantJSON: `"Worker"`,
|
||||||
},
|
},
|
||||||
"admin role": {
|
"admin role": {
|
||||||
role: Admin,
|
role: Admin,
|
||||||
wantJson: `"Admin"`,
|
wantJSON: `"Admin"`,
|
||||||
},
|
},
|
||||||
"unknown role": {
|
"unknown role": {
|
||||||
role: Unknown,
|
role: Unknown,
|
||||||
wantJson: `"Unknown"`,
|
wantJSON: `"Unknown"`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ func TestMarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
assert.Equal(tc.wantJson, string(jsonRole))
|
assert.Equal(tc.wantJSON, string(jsonRole))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -154,6 +154,10 @@ func (c *Client) GetState() (state.ConstellationState, error) {
|
|||||||
return state.ConstellationState{}, errors.New("client has no uid")
|
return state.ConstellationState{}, errors.New("client has no uid")
|
||||||
}
|
}
|
||||||
stat.UID = c.uid
|
stat.UID = c.uid
|
||||||
|
if len(c.loadBalancerPubIP) == 0 {
|
||||||
|
return state.ConstellationState{}, errors.New("client has no load balancer public IP")
|
||||||
|
}
|
||||||
|
stat.BootstrapperHost = c.loadBalancerPubIP
|
||||||
if len(c.location) == 0 {
|
if len(c.location) == 0 {
|
||||||
return state.ConstellationState{}, errors.New("client has no location")
|
return state.ConstellationState{}, errors.New("client has no location")
|
||||||
}
|
}
|
||||||
@ -213,6 +217,10 @@ func (c *Client) SetState(stat state.ConstellationState) error {
|
|||||||
return errors.New("state has no uuid")
|
return errors.New("state has no uuid")
|
||||||
}
|
}
|
||||||
c.uid = stat.UID
|
c.uid = stat.UID
|
||||||
|
if len(stat.BootstrapperHost) == 0 {
|
||||||
|
return errors.New("state has no bootstrapper host")
|
||||||
|
}
|
||||||
|
c.loadBalancerPubIP = stat.BootstrapperHost
|
||||||
if len(stat.AzureLocation) == 0 {
|
if len(stat.AzureLocation) == 0 {
|
||||||
return errors.New("state has no location")
|
return errors.New("state has no location")
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -58,6 +59,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -80,6 +82,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -107,6 +110,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -134,6 +138,35 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
|
AzureResourceGroup: "resource-group",
|
||||||
|
AzureLocation: "location",
|
||||||
|
AzureSubscription: "subscription",
|
||||||
|
AzureTenant: "tenant",
|
||||||
|
AzureSubnet: "azure-subnet",
|
||||||
|
AzureNetworkSecurityGroup: "network-security-group",
|
||||||
|
AzureWorkersScaleSet: "worker-scale-set",
|
||||||
|
AzureControlPlanesScaleSet: "controlplane-scale-set",
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"missing bootstrapper host": {
|
||||||
|
state: state.ConstellationState{
|
||||||
|
CloudProvider: cloudprovider.Azure.String(),
|
||||||
|
AzureWorkers: cloudtypes.Instances{
|
||||||
|
"0": {
|
||||||
|
PublicIP: "ip1",
|
||||||
|
PrivateIP: "ip2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AzureControlPlane: cloudtypes.Instances{
|
||||||
|
"0": {
|
||||||
|
PublicIP: "ip3",
|
||||||
|
PrivateIP: "ip4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Name: "name",
|
||||||
|
UID: "uid",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -162,6 +195,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
AzureTenant: "tenant",
|
AzureTenant: "tenant",
|
||||||
@ -189,6 +223,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
AzureTenant: "tenant",
|
AzureTenant: "tenant",
|
||||||
@ -216,6 +251,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureTenant: "tenant",
|
AzureTenant: "tenant",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
@ -243,6 +279,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
@ -270,6 +307,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -297,6 +335,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -324,6 +363,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -351,6 +391,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "bootstrapper-host",
|
||||||
AzureResourceGroup: "resource-group",
|
AzureResourceGroup: "resource-group",
|
||||||
AzureLocation: "location",
|
AzureLocation: "location",
|
||||||
AzureSubscription: "subscription",
|
AzureSubscription: "subscription",
|
||||||
@ -400,6 +441,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
controlPlanes: tc.state.AzureControlPlane,
|
controlPlanes: tc.state.AzureControlPlane,
|
||||||
name: tc.state.Name,
|
name: tc.state.Name,
|
||||||
uid: tc.state.UID,
|
uid: tc.state.UID,
|
||||||
|
loadBalancerPubIP: tc.state.BootstrapperHost,
|
||||||
resourceGroup: tc.state.AzureResourceGroup,
|
resourceGroup: tc.state.AzureResourceGroup,
|
||||||
location: tc.state.AzureLocation,
|
location: tc.state.AzureLocation,
|
||||||
subscriptionID: tc.state.AzureSubscription,
|
subscriptionID: tc.state.AzureSubscription,
|
||||||
|
@ -61,14 +61,6 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
|||||||
}
|
}
|
||||||
c.controlPlanes = instances
|
c.controlPlanes = instances
|
||||||
|
|
||||||
// Set the load balancer public IP in the first control plane
|
|
||||||
coord, ok := c.controlPlanes["0"]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("control plane 0 not found")
|
|
||||||
}
|
|
||||||
coord.PublicIP = c.loadBalancerPubIP
|
|
||||||
c.controlPlanes["0"] = coord
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ func TestCreateInstances(t *testing.T) {
|
|||||||
assert.NotEmpty(client.workers["0"].PrivateIP)
|
assert.NotEmpty(client.workers["0"].PrivateIP)
|
||||||
assert.NotEmpty(client.workers["0"].PublicIP)
|
assert.NotEmpty(client.workers["0"].PublicIP)
|
||||||
assert.NotEmpty(client.controlPlanes["0"].PrivateIP)
|
assert.NotEmpty(client.controlPlanes["0"].PrivateIP)
|
||||||
assert.Equal("lbip", client.controlPlanes["0"].PublicIP)
|
assert.NotEmpty(client.controlPlanes["0"].PublicIP)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// VMInstance describes a single instance.
|
||||||
// TODO: deprecate as soon as scale sets are available.
|
// TODO: deprecate as soon as scale sets are available.
|
||||||
type VMInstance struct {
|
type VMInstance struct {
|
||||||
Name string
|
Name string
|
||||||
@ -20,6 +21,7 @@ type VMInstance struct {
|
|||||||
Image string
|
Image string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Azure makes a new virtual machine template with default values.
|
||||||
// TODO: deprecate as soon as scale sets are available.
|
// TODO: deprecate as soon as scale sets are available.
|
||||||
func (i VMInstance) Azure() armcompute.VirtualMachine {
|
func (i VMInstance) Azure() armcompute.VirtualMachine {
|
||||||
return armcompute.VirtualMachine{
|
return armcompute.VirtualMachine{
|
||||||
|
@ -123,7 +123,7 @@ func (c *Creator) createGCP(ctx context.Context, cl gcpclient, config *config.Co
|
|||||||
createInput := gcpcl.CreateInstancesInput{
|
createInput := gcpcl.CreateInstancesInput{
|
||||||
CountControlPlanes: controlPlaneCount,
|
CountControlPlanes: controlPlaneCount,
|
||||||
CountWorkers: workerCount,
|
CountWorkers: workerCount,
|
||||||
ImageId: config.Provider.GCP.Image,
|
ImageID: config.Provider.GCP.Image,
|
||||||
InstanceType: insType,
|
InstanceType: insType,
|
||||||
StateDiskSizeGB: config.StateDiskSizeGB,
|
StateDiskSizeGB: config.StateDiskSizeGB,
|
||||||
KubeEnv: gcp.KubeEnv,
|
KubeEnv: gcp.KubeEnv,
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/internal/state"
|
"github.com/edgelesssys/constellation/internal/state"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServieAccountCreator creates service accounts.
|
// ServiceAccountCreator creates service accounts.
|
||||||
type ServiceAccountCreator struct {
|
type ServiceAccountCreator struct {
|
||||||
newGCPClient func(ctx context.Context) (gcpclient, error)
|
newGCPClient func(ctx context.Context) (gcpclient, error)
|
||||||
newAzureClient func(subscriptionID, tenantID string) (azureclient, error)
|
newAzureClient func(subscriptionID, tenantID string) (azureclient, error)
|
||||||
|
@ -102,7 +102,7 @@ func initialize(cmd *cobra.Command, dialer grpcDialer, serviceAccCreator service
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
controlPlanes, workers, err := getScalingGroupsFromConfig(stat, config)
|
controlPlanes, workers, err := getScalingGroupsFromState(stat, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -123,7 +123,7 @@ func initialize(cmd *cobra.Command, dialer grpcDialer, serviceAccCreator service
|
|||||||
KubernetesVersion: "1.23.6",
|
KubernetesVersion: "1.23.6",
|
||||||
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
||||||
}
|
}
|
||||||
resp, err := initCall(cmd.Context(), dialer, controlPlanes.PublicIPs()[0], req)
|
resp, err := initCall(cmd.Context(), dialer, stat.BootstrapperHost, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -269,7 +269,7 @@ func readOrGenerateMasterSecret(writer io.Writer, fileHandler file.Handler, file
|
|||||||
return masterSecret, nil
|
return masterSecret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getScalingGroupsFromConfig(stat state.ConstellationState, config *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) {
|
func getScalingGroupsFromState(stat state.ConstellationState, config *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) {
|
||||||
switch {
|
switch {
|
||||||
case len(stat.GCPControlPlanes) != 0:
|
case len(stat.GCPControlPlanes) != 0:
|
||||||
return getGCPInstances(stat, config)
|
return getGCPInstances(stat, config)
|
||||||
@ -329,7 +329,7 @@ func getAzureInstances(stat state.ConstellationState, config *config.Config) (co
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getQEMUInstances(stat state.ConstellationState, config *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) {
|
func getQEMUInstances(stat state.ConstellationState, _ *config.Config) (controlPlanes, workers cloudtypes.ScalingGroup, err error) {
|
||||||
controlPlanesMap := stat.QEMUControlPlane
|
controlPlanesMap := stat.QEMUControlPlane
|
||||||
if len(controlPlanesMap) == 0 {
|
if len(controlPlanesMap) == 0 {
|
||||||
return cloudtypes.ScalingGroup{}, cloudtypes.ScalingGroup{}, errors.New("no controlPlanes available, can't create Constellation without any instance")
|
return cloudtypes.ScalingGroup{}, cloudtypes.ScalingGroup{}, errors.New("no controlPlanes available, can't create Constellation without any instance")
|
||||||
|
@ -39,6 +39,7 @@ func TestInitArgumentValidation(t *testing.T) {
|
|||||||
func TestInitialize(t *testing.T) {
|
func TestInitialize(t *testing.T) {
|
||||||
testGcpState := state.ConstellationState{
|
testGcpState := state.ConstellationState{
|
||||||
CloudProvider: "GCP",
|
CloudProvider: "GCP",
|
||||||
|
BootstrapperHost: "192.0.2.1",
|
||||||
GCPWorkers: cloudtypes.Instances{
|
GCPWorkers: cloudtypes.Instances{
|
||||||
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||||
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||||
@ -49,6 +50,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
testAzureState := state.ConstellationState{
|
testAzureState := state.ConstellationState{
|
||||||
CloudProvider: "Azure",
|
CloudProvider: "Azure",
|
||||||
|
BootstrapperHost: "192.0.2.1",
|
||||||
AzureWorkers: cloudtypes.Instances{
|
AzureWorkers: cloudtypes.Instances{
|
||||||
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||||
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||||
@ -60,6 +62,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
testQemuState := state.ConstellationState{
|
testQemuState := state.ConstellationState{
|
||||||
CloudProvider: "QEMU",
|
CloudProvider: "QEMU",
|
||||||
|
BootstrapperHost: "192.0.2.1",
|
||||||
QEMUWorkers: cloudtypes.Instances{
|
QEMUWorkers: cloudtypes.Instances{
|
||||||
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
"id-0": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||||
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
"id-1": {PrivateIP: "192.0.2.1", PublicIP: "192.0.2.1"},
|
||||||
@ -183,7 +186,7 @@ func TestWriteOutput(t *testing.T) {
|
|||||||
ownerID := base64.StdEncoding.EncodeToString(resp.OwnerId)
|
ownerID := base64.StdEncoding.EncodeToString(resp.OwnerId)
|
||||||
clusterID := base64.StdEncoding.EncodeToString(resp.ClusterId)
|
clusterID := base64.StdEncoding.EncodeToString(resp.ClusterId)
|
||||||
|
|
||||||
expectedIdFile := clusterIDsFile{
|
expectedIDFile := clusterIDsFile{
|
||||||
ClusterID: clusterID,
|
ClusterID: clusterID,
|
||||||
OwnerID: ownerID,
|
OwnerID: ownerID,
|
||||||
Endpoint: net.JoinHostPort("ip", strconv.Itoa(constants.VerifyServiceNodePortGRPC)),
|
Endpoint: net.JoinHostPort("ip", strconv.Itoa(constants.VerifyServiceNodePortGRPC)),
|
||||||
@ -206,10 +209,10 @@ func TestWriteOutput(t *testing.T) {
|
|||||||
|
|
||||||
idsFile, err := afs.ReadFile(constants.ClusterIDsFileName)
|
idsFile, err := afs.ReadFile(constants.ClusterIDsFileName)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
var testIdFile clusterIDsFile
|
var testIDFile clusterIDsFile
|
||||||
err = json.Unmarshal(idsFile, &testIdFile)
|
err = json.Unmarshal(idsFile, &testIDFile)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.Equal(expectedIdFile, testIdFile)
|
assert.Equal(expectedIDFile, testIDFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInitCompletion(t *testing.T) {
|
func TestInitCompletion(t *testing.T) {
|
||||||
|
@ -7,7 +7,7 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewVerifyCmd returns a new cobra.Command for the verify command.
|
// NewVersionCmd returns a new cobra.Command for the verify command.
|
||||||
func NewVersionCmd() *cobra.Command {
|
func NewVersionCmd() *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "version",
|
Use: "version",
|
||||||
|
@ -227,6 +227,11 @@ func (c *Client) GetState() (state.ConstellationState, error) {
|
|||||||
return state.ConstellationState{}, errors.New("client has no controlPlanes")
|
return state.ConstellationState{}, errors.New("client has no controlPlanes")
|
||||||
}
|
}
|
||||||
stat.GCPControlPlanes = c.controlPlanes
|
stat.GCPControlPlanes = c.controlPlanes
|
||||||
|
publicIPs := c.controlPlanes.PublicIPs()
|
||||||
|
if len(publicIPs) == 0 {
|
||||||
|
return state.ConstellationState{}, errors.New("client has no bootstrapper endpoint")
|
||||||
|
}
|
||||||
|
stat.BootstrapperHost = publicIPs[0]
|
||||||
|
|
||||||
if c.workerInstanceGroup == "" {
|
if c.workerInstanceGroup == "" {
|
||||||
return state.ConstellationState{}, errors.New("client has no workerInstanceGroup")
|
return state.ConstellationState{}, errors.New("client has no workerInstanceGroup")
|
||||||
|
@ -46,6 +46,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -73,6 +74,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -100,6 +102,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -132,6 +135,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -164,6 +168,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -196,6 +201,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -228,6 +234,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -260,6 +267,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPZone: "zone-id",
|
GCPZone: "zone-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -291,6 +299,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPProject: "proj-id",
|
GCPProject: "proj-id",
|
||||||
GCPZone: "zone-id",
|
GCPZone: "zone-id",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -322,6 +331,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPProject: "proj-id",
|
GCPProject: "proj-id",
|
||||||
GCPZone: "zone-id",
|
GCPZone: "zone-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
@ -356,6 +366,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPWorkerInstanceTemplate: "temp-id",
|
GCPWorkerInstanceTemplate: "temp-id",
|
||||||
@ -388,6 +399,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
GCPWorkerInstanceTemplate: "temp-id",
|
GCPWorkerInstanceTemplate: "temp-id",
|
||||||
GCPControlPlaneInstanceTemplate: "temp-id",
|
GCPControlPlaneInstanceTemplate: "temp-id",
|
||||||
@ -419,6 +431,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
GCPWorkerInstanceTemplate: "temp-id",
|
GCPWorkerInstanceTemplate: "temp-id",
|
||||||
@ -451,6 +464,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
GCPWorkerInstanceTemplate: "temp-id",
|
GCPWorkerInstanceTemplate: "temp-id",
|
||||||
@ -483,6 +497,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
GCPWorkerInstanceTemplate: "temp-id",
|
GCPWorkerInstanceTemplate: "temp-id",
|
||||||
@ -515,6 +530,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -547,6 +563,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -579,6 +596,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -611,6 +629,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -643,6 +662,7 @@ func TestSetGetState(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -745,6 +765,7 @@ func TestSetStateCloudProvider(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
@ -776,6 +797,7 @@ func TestSetStateCloudProvider(t *testing.T) {
|
|||||||
GCPRegion: "region-id",
|
GCPRegion: "region-id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
UID: "uid",
|
UID: "uid",
|
||||||
|
BootstrapperHost: "ip3",
|
||||||
GCPNetwork: "net-id",
|
GCPNetwork: "net-id",
|
||||||
GCPSubnetwork: "subnet-id",
|
GCPSubnetwork: "subnet-id",
|
||||||
GCPFirewalls: []string{"fw-1", "fw-2"},
|
GCPFirewalls: []string{"fw-1", "fw-2"},
|
||||||
|
@ -30,7 +30,7 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
|||||||
Network: c.network,
|
Network: c.network,
|
||||||
SecondarySubnetworkRangeName: c.secondarySubnetworkRange,
|
SecondarySubnetworkRangeName: c.secondarySubnetworkRange,
|
||||||
Subnetwork: c.subnetwork,
|
Subnetwork: c.subnetwork,
|
||||||
ImageId: input.ImageId,
|
ImageID: input.ImageID,
|
||||||
InstanceType: input.InstanceType,
|
InstanceType: input.InstanceType,
|
||||||
StateDiskSizeGB: int64(input.StateDiskSizeGB),
|
StateDiskSizeGB: int64(input.StateDiskSizeGB),
|
||||||
Role: role.Worker.String(),
|
Role: role.Worker.String(),
|
||||||
@ -52,7 +52,7 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
|||||||
Network: c.network,
|
Network: c.network,
|
||||||
Subnetwork: c.subnetwork,
|
Subnetwork: c.subnetwork,
|
||||||
SecondarySubnetworkRangeName: c.secondarySubnetworkRange,
|
SecondarySubnetworkRangeName: c.secondarySubnetworkRange,
|
||||||
ImageId: input.ImageId,
|
ImageID: input.ImageID,
|
||||||
InstanceType: input.InstanceType,
|
InstanceType: input.InstanceType,
|
||||||
StateDiskSizeGB: int64(input.StateDiskSizeGB),
|
StateDiskSizeGB: int64(input.StateDiskSizeGB),
|
||||||
Role: role.ControlPlane.String(),
|
Role: role.ControlPlane.String(),
|
||||||
@ -197,13 +197,13 @@ func (c *Client) deleteInstanceGroupManager(ctx context.Context, instanceGroupMa
|
|||||||
return c.instanceGroupManagersAPI.Delete(ctx, req)
|
return c.instanceGroupManagersAPI.Delete(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) waitForInstanceGroupScaling(ctx context.Context, groupId string) error {
|
func (c *Client) waitForInstanceGroupScaling(ctx context.Context, groupID string) error {
|
||||||
for {
|
for {
|
||||||
if err := ctx.Err(); err != nil {
|
if err := ctx.Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
listReq := &computepb.ListManagedInstancesInstanceGroupManagersRequest{
|
listReq := &computepb.ListManagedInstancesInstanceGroupManagersRequest{
|
||||||
InstanceGroupManager: groupId,
|
InstanceGroupManager: groupID,
|
||||||
Project: c.project,
|
Project: c.project,
|
||||||
Zone: c.zone,
|
Zone: c.zone,
|
||||||
}
|
}
|
||||||
@ -228,9 +228,9 @@ func (c *Client) waitForInstanceGroupScaling(ctx context.Context, groupId string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getInstanceIPs requests the IPs of the client's instances.
|
// getInstanceIPs requests the IPs of the client's instances.
|
||||||
func (c *Client) getInstanceIPs(ctx context.Context, groupId string, list cloudtypes.Instances) error {
|
func (c *Client) getInstanceIPs(ctx context.Context, groupID string, list cloudtypes.Instances) error {
|
||||||
req := &computepb.ListInstancesRequest{
|
req := &computepb.ListInstancesRequest{
|
||||||
Filter: proto.String("name=" + groupId + "*"),
|
Filter: proto.String("name=" + groupID + "*"),
|
||||||
Project: c.project,
|
Project: c.project,
|
||||||
Zone: c.zone,
|
Zone: c.zone,
|
||||||
}
|
}
|
||||||
@ -292,7 +292,7 @@ func (i *instanceGroupManagerInput) InsertInstanceGroupManagerRequest() computep
|
|||||||
type CreateInstancesInput struct {
|
type CreateInstancesInput struct {
|
||||||
CountWorkers int
|
CountWorkers int
|
||||||
CountControlPlanes int
|
CountControlPlanes int
|
||||||
ImageId string
|
ImageID string
|
||||||
InstanceType string
|
InstanceType string
|
||||||
StateDiskSizeGB int
|
StateDiskSizeGB int
|
||||||
KubeEnv string
|
KubeEnv string
|
||||||
@ -303,7 +303,7 @@ type insertInstanceTemplateInput struct {
|
|||||||
Network string
|
Network string
|
||||||
Subnetwork string
|
Subnetwork string
|
||||||
SecondarySubnetworkRangeName string
|
SecondarySubnetworkRangeName string
|
||||||
ImageId string
|
ImageID string
|
||||||
InstanceType string
|
InstanceType string
|
||||||
StateDiskSizeGB int64
|
StateDiskSizeGB int64
|
||||||
Role string
|
Role string
|
||||||
@ -328,7 +328,7 @@ func (i insertInstanceTemplateInput) insertInstanceTemplateRequest() *computepb.
|
|||||||
{
|
{
|
||||||
InitializeParams: &computepb.AttachedDiskInitializeParams{
|
InitializeParams: &computepb.AttachedDiskInitializeParams{
|
||||||
DiskSizeGb: proto.Int64(10),
|
DiskSizeGb: proto.Int64(10),
|
||||||
SourceImage: proto.String(i.ImageId),
|
SourceImage: proto.String(i.ImageID),
|
||||||
},
|
},
|
||||||
AutoDelete: proto.Bool(true),
|
AutoDelete: proto.Bool(true),
|
||||||
Boot: proto.Bool(true),
|
Boot: proto.Bool(true),
|
||||||
|
@ -43,7 +43,7 @@ func TestCreateInstances(t *testing.T) {
|
|||||||
testInput := CreateInstancesInput{
|
testInput := CreateInstancesInput{
|
||||||
CountControlPlanes: 3,
|
CountControlPlanes: 3,
|
||||||
CountWorkers: 4,
|
CountWorkers: 4,
|
||||||
ImageId: "img",
|
ImageID: "img",
|
||||||
InstanceType: "n2d-standard-2",
|
InstanceType: "n2d-standard-2",
|
||||||
KubeEnv: "kube-env",
|
KubeEnv: "kube-env",
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/internal/cloud/cloudtypes"
|
"github.com/edgelesssys/constellation/internal/cloud/cloudtypes"
|
||||||
"google.golang.org/genproto/googleapis/cloud/compute/v1"
|
|
||||||
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
|
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
@ -208,11 +207,13 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error {
|
|||||||
Region: c.region,
|
Region: c.region,
|
||||||
HealthCheckResource: &computepb.HealthCheck{
|
HealthCheckResource: &computepb.HealthCheck{
|
||||||
Name: proto.String(c.healthCheck),
|
Name: proto.String(c.healthCheck),
|
||||||
Type: proto.String(compute.HealthCheck_Type_name[int32(compute.HealthCheck_TCP)]),
|
Type: proto.String(computepb.HealthCheck_Type_name[int32(computepb.HealthCheck_HTTPS)]),
|
||||||
CheckIntervalSec: proto.Int32(1),
|
CheckIntervalSec: proto.Int32(1),
|
||||||
TimeoutSec: proto.Int32(1),
|
TimeoutSec: proto.Int32(1),
|
||||||
TcpHealthCheck: &computepb.TCPHealthCheck{
|
HttpsHealthCheck: &computepb.HTTPSHealthCheck{
|
||||||
|
Host: proto.String(""),
|
||||||
Port: proto.Int32(6443),
|
Port: proto.Int32(6443),
|
||||||
|
RequestPath: proto.String("/readyz"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@ -229,13 +230,13 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error {
|
|||||||
Region: c.region,
|
Region: c.region,
|
||||||
BackendServiceResource: &computepb.BackendService{
|
BackendServiceResource: &computepb.BackendService{
|
||||||
Name: proto.String(c.backendService),
|
Name: proto.String(c.backendService),
|
||||||
Protocol: proto.String(compute.BackendService_Protocol_name[int32(compute.BackendService_TCP)]),
|
Protocol: proto.String(computepb.BackendService_Protocol_name[int32(computepb.BackendService_TCP)]),
|
||||||
LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(compute.BackendService_EXTERNAL)]),
|
LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(computepb.BackendService_EXTERNAL)]),
|
||||||
TimeoutSec: proto.Int32(10),
|
TimeoutSec: proto.Int32(10),
|
||||||
HealthChecks: []string{"https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/healthChecks/" + c.healthCheck},
|
HealthChecks: []string{"https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/healthChecks/" + c.healthCheck},
|
||||||
Backends: []*computepb.Backend{
|
Backends: []*computepb.Backend{
|
||||||
{
|
{
|
||||||
BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(compute.Backend_CONNECTION)]),
|
BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_CONNECTION)]),
|
||||||
Group: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/zones/" + c.zone + "/instanceGroups/" + c.controlPlaneInstanceGroup),
|
Group: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/zones/" + c.zone + "/instanceGroups/" + c.controlPlaneInstanceGroup),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -254,8 +255,8 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error {
|
|||||||
Region: c.region,
|
Region: c.region,
|
||||||
ForwardingRuleResource: &computepb.ForwardingRule{
|
ForwardingRuleResource: &computepb.ForwardingRule{
|
||||||
Name: proto.String(c.forwardingRule),
|
Name: proto.String(c.forwardingRule),
|
||||||
IPProtocol: proto.String(compute.ForwardingRule_IPProtocolEnum_name[int32(compute.ForwardingRule_TCP)]),
|
IPProtocol: proto.String(computepb.ForwardingRule_IPProtocolEnum_name[int32(computepb.ForwardingRule_TCP)]),
|
||||||
LoadBalancingScheme: proto.String(compute.ForwardingRule_LoadBalancingScheme_name[int32(compute.ForwardingRule_EXTERNAL)]),
|
LoadBalancingScheme: proto.String(computepb.ForwardingRule_LoadBalancingScheme_name[int32(computepb.ForwardingRule_EXTERNAL)]),
|
||||||
Ports: []string{"6443", "9000"},
|
Ports: []string{"6443", "9000"},
|
||||||
BackendService: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/backendServices/" + c.backendService),
|
BackendService: proto.String("https://www.googleapis.com/compute/v1/projects/" + c.project + "/regions/" + c.region + "/backendServices/" + c.backendService),
|
||||||
},
|
},
|
||||||
@ -295,7 +296,7 @@ func (c *Client) CreateLoadBalancer(ctx context.Context) error {
|
|||||||
return c.waitForOperations(ctx, []Operation{resp})
|
return c.waitForOperations(ctx, []Operation{resp})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TerminteLoadBalancer removes the load balancer and its associated resources.
|
// TerminateLoadBalancer removes the load balancer and its associated resources.
|
||||||
func (c *Client) TerminateLoadBalancer(ctx context.Context) error {
|
func (c *Client) TerminateLoadBalancer(ctx context.Context) error {
|
||||||
resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteForwardingRuleRequest{
|
resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteForwardingRuleRequest{
|
||||||
Project: c.project,
|
Project: c.project,
|
||||||
|
@ -187,6 +187,17 @@ func getIPsFromConfig(stat statec.ConstellationState, config configc.Config) ([]
|
|||||||
ips = append(ips, ip)
|
ips = append(ips, ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// add bootstrapper IP if it is not already in the list
|
||||||
|
var foundBootstrapperIP bool
|
||||||
|
for _, ip := range ips {
|
||||||
|
if ip == stat.BootstrapperHost {
|
||||||
|
foundBootstrapperIP = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundBootstrapperIP && stat.BootstrapperHost != "" {
|
||||||
|
ips = append(ips, stat.BootstrapperHost)
|
||||||
|
}
|
||||||
if len(ips) == 0 {
|
if len(ips) == 0 {
|
||||||
return nil, fmt.Errorf("no public IPs found in statefile")
|
return nil, fmt.Errorf("no public IPs found in statefile")
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,8 @@ Wants=network-online.target
|
|||||||
After=network-online.target
|
After=network-online.target
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
|
RemainAfterExit=yes
|
||||||
|
Restart=on-failure
|
||||||
EnvironmentFile=/etc/constellation.env
|
EnvironmentFile=/etc/constellation.env
|
||||||
ExecStartPre=-setenforce Permissive
|
ExecStartPre=-setenforce Permissive
|
||||||
ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin/
|
ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin/
|
||||||
|
@ -216,27 +216,27 @@ type fakeDbusConn struct {
|
|||||||
actionErr error
|
actionErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDbusConn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
|
func (c *fakeDbusConn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
|
||||||
f.inputs = append(f.inputs, dbusConnActionInput{name: name, mode: mode})
|
c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode})
|
||||||
ch <- f.result
|
ch <- c.result
|
||||||
|
|
||||||
return f.jobID, f.actionErr
|
return c.jobID, c.actionErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDbusConn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
|
func (c *fakeDbusConn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
|
||||||
f.inputs = append(f.inputs, dbusConnActionInput{name: name, mode: mode})
|
c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode})
|
||||||
ch <- f.result
|
ch <- c.result
|
||||||
|
|
||||||
return f.jobID, f.actionErr
|
return c.jobID, c.actionErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDbusConn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
|
func (c *fakeDbusConn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) {
|
||||||
f.inputs = append(f.inputs, dbusConnActionInput{name: name, mode: mode})
|
c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode})
|
||||||
ch <- f.result
|
ch <- c.result
|
||||||
|
|
||||||
return f.jobID, f.actionErr
|
return c.jobID, c.actionErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *fakeDbusConn) ReloadContext(ctx context.Context) error {
|
func (c *fakeDbusConn) ReloadContext(ctx context.Context) error {
|
||||||
return s.actionErr
|
return c.actionErr
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,7 @@ func transformState(tfOut terraformOutput) state.ConstellationState {
|
|||||||
Name: "qemu",
|
Name: "qemu",
|
||||||
UID: "debug",
|
UID: "debug",
|
||||||
CloudProvider: "qemu",
|
CloudProvider: "qemu",
|
||||||
|
BootstrapperHost: tfOut.ControlPlaneIPs.Value[0],
|
||||||
QEMUWorkers: cloudtypes.Instances{},
|
QEMUWorkers: cloudtypes.Instances{},
|
||||||
QEMUControlPlane: cloudtypes.Instances{},
|
QEMUControlPlane: cloudtypes.Instances{},
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ func TestGetGCEInstanceInfo(t *testing.T) {
|
|||||||
projectIDString: "projectID",
|
projectIDString: "projectID",
|
||||||
instanceNameString: "instanceName",
|
instanceNameString: "instanceName",
|
||||||
zoneString: "zone",
|
zoneString: "zone",
|
||||||
projecIdErr: errors.New("error"),
|
projecIDErr: errors.New("error"),
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -78,13 +78,13 @@ type fakeMetadataClient struct {
|
|||||||
projectIDString string
|
projectIDString string
|
||||||
instanceNameString string
|
instanceNameString string
|
||||||
zoneString string
|
zoneString string
|
||||||
projecIdErr error
|
projecIDErr error
|
||||||
instanceNameErr error
|
instanceNameErr error
|
||||||
zoneErr error
|
zoneErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c fakeMetadataClient) projectID() (string, error) {
|
func (c fakeMetadataClient) projectID() (string, error) {
|
||||||
return c.projectIDString, c.projecIdErr
|
return c.projectIDString, c.projecIDErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c fakeMetadataClient) instanceName() (string, error) {
|
func (c fakeMetadataClient) instanceName() (string, error) {
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/internal/constants"
|
"github.com/edgelesssys/constellation/internal/constants"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Instance describes metadata of a peer.
|
// InstanceMetadata describes metadata of a peer.
|
||||||
type InstanceMetadata struct {
|
type InstanceMetadata struct {
|
||||||
Name string
|
Name string
|
||||||
ProviderID string
|
ProviderID string
|
||||||
@ -32,6 +32,7 @@ type InstanceLister interface {
|
|||||||
List(ctx context.Context) ([]InstanceMetadata, error)
|
List(ctx context.Context) ([]InstanceMetadata, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InitServerEndpoints returns the list of endpoints for the init server, which are running on the control plane nodes.
|
||||||
func InitServerEndpoints(ctx context.Context, lister InstanceLister) ([]string, error) {
|
func InitServerEndpoints(ctx context.Context, lister InstanceLister) ([]string, error) {
|
||||||
instances, err := lister.List(ctx)
|
instances, err := lister.List(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -88,7 +88,7 @@ func New(logType LogType, logLevel zapcore.Level) *Logger {
|
|||||||
return &Logger{logger: logger.Sugar()}
|
return &Logger{logger: logger.Sugar()}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTestLogger creates a logger for unit / integration tests.
|
// NewTest creates a logger for unit / integration tests.
|
||||||
func NewTest(t *testing.T) *Logger {
|
func NewTest(t *testing.T) *Logger {
|
||||||
return &Logger{
|
return &Logger{
|
||||||
logger: zaptest.NewLogger(t).Sugar().Named(fmt.Sprintf("%q", t.Name())),
|
logger: zaptest.NewLogger(t).Sugar().Named(fmt.Sprintf("%q", t.Name())),
|
||||||
|
@ -9,6 +9,7 @@ type ConstellationState struct {
|
|||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
UID string `json:"uid,omitempty"`
|
UID string `json:"uid,omitempty"`
|
||||||
CloudProvider string `json:"cloudprovider,omitempty"`
|
CloudProvider string `json:"cloudprovider,omitempty"`
|
||||||
|
BootstrapperHost string `json:"bootstrapperhost,omitempty"`
|
||||||
|
|
||||||
GCPWorkers cloudtypes.Instances `json:"gcpworkers,omitempty"`
|
GCPWorkers cloudtypes.Instances `json:"gcpworkers,omitempty"`
|
||||||
GCPControlPlanes cloudtypes.Instances `json:"gcpcontrolplanes,omitempty"`
|
GCPControlPlanes cloudtypes.Instances `json:"gcpcontrolplanes,omitempty"`
|
||||||
|
@ -50,7 +50,7 @@ func main() {
|
|||||||
|
|
||||||
creds := atlscredentials.New(nil, []atls.Validator{validator})
|
creds := atlscredentials.New(nil, []atls.Validator{validator})
|
||||||
|
|
||||||
vpcIP, err := getIPinVPC(ctx, *provider)
|
vpcIP, err := getVPCIP(ctx, *provider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to get IP in VPC")
|
log.With(zap.Error(err)).Fatalf("Failed to get IP in VPC")
|
||||||
}
|
}
|
||||||
@ -87,7 +87,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIPinVPC(ctx context.Context, provider string) (string, error) {
|
func getVPCIP(ctx context.Context, provider string) (string, error) {
|
||||||
switch cloudprovider.FromString(provider) {
|
switch cloudprovider.FromString(provider) {
|
||||||
case cloudprovider.Azure:
|
case cloudprovider.Azure:
|
||||||
metadata, err := azurecloud.NewMetadata(ctx)
|
metadata, err := azurecloud.NewMetadata(ctx)
|
||||||
|
@ -27,7 +27,7 @@ func New(log *logger.Logger, endpoint string) Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDEK returns a data encryption key for the given UUID.
|
// GetDataKey returns a data encryption key for the given UUID.
|
||||||
func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error) {
|
func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error) {
|
||||||
log := c.log.With(zap.String("diskUUID", uuid), zap.String("endpoint", c.endpoint))
|
log := c.log.With(zap.String("diskUUID", uuid), zap.String("endpoint", c.endpoint))
|
||||||
// TODO: update credentials if we enable aTLS on the KMS
|
// TODO: update credentials if we enable aTLS on the KMS
|
||||||
|
@ -5,6 +5,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/internal/constants"
|
||||||
|
"github.com/edgelesssys/constellation/internal/logger"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts"
|
"k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts"
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
)
|
)
|
||||||
@ -18,11 +22,15 @@ type keyManager struct {
|
|||||||
key string
|
key string
|
||||||
expirationDate time.Time
|
expirationDate time.Time
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
|
client clientset.Interface
|
||||||
|
log *logger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newKeyManager() *keyManager {
|
func newKeyManager(client clientset.Interface, log *logger.Logger) *keyManager {
|
||||||
return &keyManager{
|
return &keyManager{
|
||||||
clock: clock.RealClock{},
|
clock: clock.RealClock{},
|
||||||
|
client: client,
|
||||||
|
log: log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,6 +53,15 @@ func (k *keyManager) getCertificatetKey() (string, error) {
|
|||||||
}
|
}
|
||||||
k.expirationDate = k.clock.Now().Add(certificateKeyTTL)
|
k.expirationDate = k.clock.Now().Add(certificateKeyTTL)
|
||||||
k.key = key
|
k.key = key
|
||||||
|
k.log.Infof("Uploading certs to Kubernetes")
|
||||||
|
cfg := &kubeadmapi.InitConfiguration{
|
||||||
|
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||||
|
CertificatesDir: constants.KubeadmCertificateDir,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := copycerts.UploadCerts(k.client, cfg, key); err != nil {
|
||||||
|
return "", fmt.Errorf("uploading certs: %w", err)
|
||||||
|
}
|
||||||
case k.expirationDate.After(k.clock.Now()):
|
case k.expirationDate.After(k.clock.Now()):
|
||||||
// key is still valid
|
// key is still valid
|
||||||
// if TTL is less than 2 minutes away, increase it by 2 minutes
|
// if TTL is less than 2 minutes away, increase it by 2 minutes
|
||||||
|
@ -4,7 +4,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/internal/logger"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
testclock "k8s.io/utils/clock/testing"
|
testclock "k8s.io/utils/clock/testing"
|
||||||
)
|
)
|
||||||
@ -12,30 +17,42 @@ import (
|
|||||||
func TestKeyManager(t *testing.T) {
|
func TestKeyManager(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
|
client clientset.Interface
|
||||||
ttl time.Time
|
ttl time.Time
|
||||||
key string
|
key string
|
||||||
shouldReuse bool
|
shouldReuse bool
|
||||||
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"no key exists": {
|
"no key exists": {
|
||||||
clock: testclock.NewFakeClock(time.Time{}),
|
clock: testclock.NewFakeClock(time.Time{}),
|
||||||
|
client: fake.NewSimpleClientset(),
|
||||||
},
|
},
|
||||||
"key exists and is valid": {
|
"key exists and is valid": {
|
||||||
clock: testclock.NewFakeClock(time.Time{}),
|
clock: testclock.NewFakeClock(time.Time{}),
|
||||||
|
client: fake.NewSimpleClientset(),
|
||||||
ttl: time.Time{}.Add(time.Hour),
|
ttl: time.Time{}.Add(time.Hour),
|
||||||
key: "key",
|
key: "key",
|
||||||
shouldReuse: true,
|
shouldReuse: true,
|
||||||
},
|
},
|
||||||
"key has expired": {
|
"key has expired": {
|
||||||
clock: testclock.NewFakeClock(time.Time{}.Add(time.Hour)),
|
clock: testclock.NewFakeClock(time.Time{}.Add(time.Hour)),
|
||||||
|
client: fake.NewSimpleClientset(),
|
||||||
ttl: time.Time{},
|
ttl: time.Time{},
|
||||||
key: "key",
|
key: "key",
|
||||||
},
|
},
|
||||||
"key expires in the next 30 seconds": {
|
"key expires in the next 30 seconds": {
|
||||||
clock: testclock.NewFakeClock(time.Time{}),
|
clock: testclock.NewFakeClock(time.Time{}),
|
||||||
|
client: fake.NewSimpleClientset(),
|
||||||
ttl: time.Time{}.Add(30 * time.Second),
|
ttl: time.Time{}.Add(30 * time.Second),
|
||||||
key: "key",
|
key: "key",
|
||||||
shouldReuse: true,
|
shouldReuse: true,
|
||||||
},
|
},
|
||||||
|
"uploading certs fails": {
|
||||||
|
clock: testclock.NewFakeClock(time.Time{}),
|
||||||
|
client: &failingClient{
|
||||||
|
fake.NewSimpleClientset(),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
@ -46,9 +63,15 @@ func TestKeyManager(t *testing.T) {
|
|||||||
expirationDate: tc.ttl,
|
expirationDate: tc.ttl,
|
||||||
key: tc.key,
|
key: tc.key,
|
||||||
clock: tc.clock,
|
clock: tc.clock,
|
||||||
|
log: logger.NewTest(t),
|
||||||
|
client: fake.NewSimpleClientset(),
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := km.getCertificatetKey()
|
key, err := km.getCertificatetKey()
|
||||||
|
if tc.wantErr {
|
||||||
|
assert.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.True(km.expirationDate.After(tc.clock.Now().Add(2 * time.Minute)))
|
assert.True(km.expirationDate.After(tc.clock.Now().Add(2 * time.Minute)))
|
||||||
|
|
||||||
@ -61,3 +84,13 @@ func TestKeyManager(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type failingClient struct {
|
||||||
|
*fake.Clientset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *failingClient) CoreV1() corev1.CoreV1Interface {
|
||||||
|
return &failingCoreV1{
|
||||||
|
&fakecorev1.FakeCoreV1{Fake: &f.Clientset.Fake},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,10 +16,8 @@ import (
|
|||||||
certutil "k8s.io/client-go/util/cert"
|
certutil "k8s.io/client-go/util/cert"
|
||||||
bootstraputil "k8s.io/cluster-bootstrap/token/util"
|
bootstraputil "k8s.io/cluster-bootstrap/token/util"
|
||||||
bootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
|
bootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
|
||||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||||
tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/copycerts"
|
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
"k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin"
|
"k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin"
|
||||||
)
|
)
|
||||||
@ -48,7 +46,7 @@ func New(apiServerEndpoint string, log *logger.Logger) (*Kubeadm, error) {
|
|||||||
return &Kubeadm{
|
return &Kubeadm{
|
||||||
apiServerEndpoint: apiServerEndpoint,
|
apiServerEndpoint: apiServerEndpoint,
|
||||||
log: log,
|
log: log,
|
||||||
keyManager: newKeyManager(),
|
keyManager: newKeyManager(client, log),
|
||||||
client: client,
|
client: client,
|
||||||
file: file,
|
file: file,
|
||||||
}, nil
|
}, nil
|
||||||
@ -113,21 +111,10 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov
|
|||||||
// GetControlPlaneCertificateKey uploads Kubernetes encrypted CA certificates to Kubernetes and returns the decryption key.
|
// GetControlPlaneCertificateKey uploads Kubernetes encrypted CA certificates to Kubernetes and returns the decryption key.
|
||||||
// The key can be used by new nodes to join the cluster as a control plane node.
|
// The key can be used by new nodes to join the cluster as a control plane node.
|
||||||
func (k *Kubeadm) GetControlPlaneCertificateKey() (string, error) {
|
func (k *Kubeadm) GetControlPlaneCertificateKey() (string, error) {
|
||||||
k.log.Infof("Creating new random control plane certificate key")
|
k.log.Infof("Creating new random control plane certificate key (or returning cached key)")
|
||||||
key, err := k.keyManager.getCertificatetKey()
|
key, err := k.keyManager.getCertificatetKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("couldn't create control plane certificate key: %w", err)
|
return "", fmt.Errorf("couldn't create control plane certificate key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
k.log.Infof("Uploading certs to Kubernetes")
|
|
||||||
cfg := &kubeadmapi.InitConfiguration{
|
|
||||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
|
||||||
CertificatesDir: constants.KubeadmCertificateDir,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := copycerts.UploadCerts(k.client, cfg, key); err != nil {
|
|
||||||
return "", fmt.Errorf("uploading certs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return key, nil
|
return key, nil
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
|
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
|
||||||
@ -104,53 +103,6 @@ kind: Config`,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetControlPlaneCertificateKey(t *testing.T) {
|
|
||||||
testCases := map[string]struct {
|
|
||||||
wantErr bool
|
|
||||||
client clientset.Interface
|
|
||||||
}{
|
|
||||||
"success": {
|
|
||||||
client: fake.NewSimpleClientset(),
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
"failure": {
|
|
||||||
client: &failingClient{
|
|
||||||
fake.NewSimpleClientset(),
|
|
||||||
},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
client := &Kubeadm{
|
|
||||||
keyManager: &keyManager{clock: testclock.NewFakeClock(time.Time{})},
|
|
||||||
log: logger.NewTest(t),
|
|
||||||
client: tc.client,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := client.GetControlPlaneCertificateKey()
|
|
||||||
if tc.wantErr {
|
|
||||||
assert.Error(err)
|
|
||||||
} else {
|
|
||||||
assert.NoError(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type failingClient struct {
|
|
||||||
*fake.Clientset
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *failingClient) CoreV1() corev1.CoreV1Interface {
|
|
||||||
return &failingCoreV1{
|
|
||||||
&fakecorev1.FakeCoreV1{Fake: &f.Clientset.Fake},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type failingCoreV1 struct {
|
type failingCoreV1 struct {
|
||||||
*fakecorev1.FakeCoreV1
|
*fakecorev1.FakeCoreV1
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ type cryptoClientAPI interface {
|
|||||||
WrapKey(ctx context.Context, alg crypto.KeyWrapAlgorithm, key []byte, options *crypto.WrapKeyOptions) (crypto.WrapKeyResponse, error)
|
WrapKey(ctx context.Context, alg crypto.KeyWrapAlgorithm, key []byte, options *crypto.WrapKeyOptions) (crypto.WrapKeyResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Suffix for HSM Vaults.
|
// HSMDefaultCloud is the suffix for HSM Vaults.
|
||||||
const HSMDefaultCloud VaultSuffix = ".managedhsm.azure.net/"
|
const HSMDefaultCloud VaultSuffix = ".managedhsm.azure.net/"
|
||||||
|
|
||||||
// HSMClient implements the CloudKMS interface for Azure managed HSM.
|
// HSMClient implements the CloudKMS interface for Azure managed HSM.
|
||||||
|
@ -28,8 +28,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type KMSInformation struct {
|
type KMSInformation struct {
|
||||||
KmsUri string
|
KMSURI string
|
||||||
StorageUri string
|
StorageURI string
|
||||||
KeyEncryptionKeyID string
|
KeyEncryptionKeyID string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ func TestGetGCPKMSConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetConfig(t *testing.T) {
|
func TestGetConfig(t *testing.T) {
|
||||||
const testUri = "test://config?name=test-name&data=test-data&value=test-value"
|
const testURI = "test://config?name=test-name&data=test-data&value=test-value"
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
uri string
|
uri string
|
||||||
@ -195,17 +195,17 @@ func TestGetConfig(t *testing.T) {
|
|||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"success": {
|
"success": {
|
||||||
uri: testUri,
|
uri: testURI,
|
||||||
keys: []string{"name", "data", "value"},
|
keys: []string{"name", "data", "value"},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
"less keys than capture groups": {
|
"less keys than capture groups": {
|
||||||
uri: testUri,
|
uri: testURI,
|
||||||
keys: []string{"name", "data"},
|
keys: []string{"name", "data"},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
"invalid regex": {
|
"invalid regex": {
|
||||||
uri: testUri,
|
uri: testURI,
|
||||||
keys: []string{"name", "data", "test-value"},
|
keys: []string{"name", "data", "test-value"},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -215,7 +215,7 @@ func TestGetConfig(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"more keys than expected": {
|
"more keys than expected": {
|
||||||
uri: testUri,
|
uri: testURI,
|
||||||
keys: []string{"name", "data", "value", "anotherValue"},
|
keys: []string{"name", "data", "value", "anotherValue"},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
|
@ -84,7 +84,7 @@ type DeviceMapper interface {
|
|||||||
Resize(name string, newSize uint64) error
|
Resize(name string, newSize uint64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// cryptDevice is a wrapper for cryptsetup.Device.
|
// CryptDevice is a wrapper for cryptsetup.Device.
|
||||||
type CryptDevice struct {
|
type CryptDevice struct {
|
||||||
*cryptsetup.Device
|
*cryptsetup.Device
|
||||||
}
|
}
|
||||||
@ -192,7 +192,7 @@ func (c *CryptMapper) ResizeCryptDevice(ctx context.Context, volumeID string) (s
|
|||||||
return cryptPrefix + volumeID, nil
|
return cryptPrefix + volumeID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDeviceName returns the real device name of a mapped crypt device.
|
// GetDevicePath returns the device path of a mapped crypt device.
|
||||||
func (c *CryptMapper) GetDevicePath(volumeID string) (string, error) {
|
func (c *CryptMapper) GetDevicePath(volumeID string) (string, error) {
|
||||||
return getDevicePath(c.mapper, strings.TrimPrefix(volumeID, cryptPrefix))
|
return getDevicePath(c.mapper, strings.TrimPrefix(volumeID, cryptPrefix))
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func New(log *logger.Logger, issuer QuoteIssuer, metadata metadata.InstanceListe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PushStateDiskKeyRequest is the rpc to push state disk decryption keys to a restarting node.
|
// PushStateDiskKey is the rpc to push state disk decryption keys to a restarting node.
|
||||||
func (a *KeyAPI) PushStateDiskKey(ctx context.Context, in *keyproto.PushStateDiskKeyRequest) (*keyproto.PushStateDiskKeyResponse, error) {
|
func (a *KeyAPI) PushStateDiskKey(ctx context.Context, in *keyproto.PushStateDiskKeyRequest) (*keyproto.PushStateDiskKeyResponse, error) {
|
||||||
a.mux.Lock()
|
a.mux.Lock()
|
||||||
defer a.mux.Unlock()
|
defer a.mux.Unlock()
|
||||||
|
@ -35,7 +35,7 @@ func (m *Mapper) IsLUKSDevice() bool {
|
|||||||
return m.device.Load(cryptsetup.LUKS2{}) == nil
|
return m.device.Load(cryptsetup.LUKS2{}) == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUUID gets the device's UUID.
|
// DiskUUID gets the device's UUID.
|
||||||
func (m *Mapper) DiskUUID() string {
|
func (m *Mapper) DiskUUID() string {
|
||||||
return strings.ToLower(m.device.GetUUID())
|
return strings.ToLower(m.device.GetUUID())
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ provider "docker" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "docker_image" "qemu-metadata" {
|
resource "docker_image" "qemu-metadata" {
|
||||||
name = "ghcr.io/edgelesssys/constellation/qemu-metadata-api:latest"
|
name = "ghcr.io/edgelesssys/constellation/qemu-metadata-api:feat-coordinator-selfactivation-node"
|
||||||
keep_locally = true
|
keep_locally = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user