mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
Bootstrapper
This commit is contained in:
parent
1af18e990d
commit
66b573ea5d
@ -3,9 +3,6 @@ description: "Destroy a running Constellation cluster."
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Remove VPN configuration
|
||||
run: wg-quick down ./wg0.conf
|
||||
shell: bash
|
||||
- name: Constellation terminate
|
||||
run: constellation terminate
|
||||
shell: bash
|
||||
|
@ -57,7 +57,7 @@ jobs:
|
||||
|
||||
call-coreos:
|
||||
needs: build-bootstrapper
|
||||
if: ${{ (github.ref == 'refs/heads/main') && startsWith(needs.build-bootstrapper.outputs.bootstrapper-name, 'bootstrapper-')
|
||||
if: ${{ (github.ref == 'refs/heads/main') && startsWith(needs.build-bootstrapper.outputs.bootstrapper-name, 'bootstrapper-') }}
|
||||
uses: ./.github/workflows/build-coreos.yml
|
||||
with:
|
||||
bootstrapper-name: ${{ needs.build-bootstrapper.outputs.bootstrapper-name }}
|
@ -22,10 +22,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
### Changed
|
||||
|
||||
- Nodes add themselves to the cluster after `constellation init` is done
|
||||
Removed
|
||||
|
||||
### Deprecated
|
||||
|
||||
### Removed
|
||||
|
||||
- User facing WireGuard VPN
|
||||
|
||||
### Fixed
|
||||
|
||||
### Security
|
||||
|
@ -65,3 +65,19 @@ the `<REPOSITORY>/.vscode/settings.json` repo, so the settings will only affect
|
||||
// tests.
|
||||
"go.buildTags": "integration",
|
||||
```
|
||||
|
||||
## Naming convention
|
||||
|
||||
### Network
|
||||
|
||||
IP addresses:
|
||||
|
||||
* ip: numeric IP address
|
||||
* host: either IP address or hostname
|
||||
* endpoint: host+port
|
||||
|
||||
### Keys
|
||||
|
||||
* key: symmetric key
|
||||
* pubKey: public key
|
||||
* privKey: private key
|
||||
|
@ -1,19 +1,40 @@
|
||||
# Bootstrapper
|
||||
|
||||
## Naming convention
|
||||
The bootstrapper integrates the instance it is running on as node into the Kubernetes
|
||||
cluster. It is running on every new instance that is created.
|
||||
|
||||
### Network
|
||||
![bootstrapper architecture](./bootstrapping_arch.svg)
|
||||
|
||||
IP addresses:
|
||||
The bootstrapper has two active components:
|
||||
|
||||
* ip: numeric IP address
|
||||
* host: either IP address or hostname
|
||||
* endpoint: host+port
|
||||
## Init Flow
|
||||
|
||||
### Keys
|
||||
The InitServer is a gRPC server that is listining for initialization requests.
|
||||
The first instance needs to be initialized by the user, see the [initproto](./initproto)
|
||||
for a description of the initialization protocol. The client that talks to this server
|
||||
is part of Constellation's CLI.
|
||||
|
||||
Kinds:
|
||||
On an initialization request, the InitServer initializes a new Kubernetes cluster, essentially
|
||||
calling the InitCluster function of our Kubernetes library, which does a `kubeadm init`.
|
||||
|
||||
* key: symmetric key
|
||||
* pubKey: public key
|
||||
* privKey: private key
|
||||
## Join Flow
|
||||
|
||||
The JoinClient is a gRPC client that is trying to connect to an JoinService, which might be running
|
||||
in an already existing cluster as DaemonSet. If the JoinClient can connect to the JoinService, it tries
|
||||
to issue a join ticket. The JoinService is validating the instance which wants to join the cluster using
|
||||
aTLS. For details on the used protocol and the verification of a joining instances measurements, see the
|
||||
[joinservice](./../joinservice) package.
|
||||
|
||||
If the JOinSerivce successfully verifies the instance, it issues a join ticket. The JoinClient then
|
||||
joins the cluster by calling the `kubeadm join` command, using the token and other needed information
|
||||
from the join ticket.
|
||||
|
||||
## Synchronization, state machine, lifetime
|
||||
|
||||
The bootstrapper is automatically started on every new instance. Both InitServer and JoinClient are
|
||||
started and running in parallel. At some point during either the initialization or the join, a shared
|
||||
lock between the two components is acquired. This lock is used as point of no return. It is a state
|
||||
machine with two states (unlocked, locked) and a single transition from unlocked to locked. There is no
|
||||
way to unlock the node afterward (see [nodelock](./internal/nodelock) package).
|
||||
|
||||
After the bootstrapping, the bootstrapper is stopped.
|
||||
|
4
bootstrapper/bootstrapping_arch.svg
Normal file
4
bootstrapper/bootstrapping_arch.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 15 KiB |
@ -5,16 +5,8 @@ import (
|
||||
|
||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
// https://github.com/census-instrumentation/opencensus-go/issues/1262
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestExtractRole(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
metadata map[string]string
|
||||
|
@ -52,16 +52,16 @@ func main() {
|
||||
flag.Parse()
|
||||
cfg.Level.SetLevel(zap.DebugLevel)
|
||||
|
||||
zapLogger, err := cfg.Build()
|
||||
logger, err := cfg.Build()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *logLevelUser {
|
||||
grpc_zap.ReplaceGrpcLoggerV2(zapLogger.Named("gRPC"))
|
||||
grpc_zap.ReplaceGrpcLoggerV2(logger.Named("gRPC"))
|
||||
} else {
|
||||
grpc_zap.ReplaceGrpcLoggerV2(zapLogger.WithOptions(zap.IncreaseLevel(zap.WarnLevel)).Named("gRPC"))
|
||||
grpc_zap.ReplaceGrpcLoggerV2(logger.WithOptions(zap.IncreaseLevel(zap.WarnLevel)).Named("gRPC"))
|
||||
}
|
||||
zapLoggerCore := zapLogger.Named("core")
|
||||
logger = logger.Named("bootstrapper")
|
||||
|
||||
var issuer atls.Issuer
|
||||
var openTPM vtpm.TPMOpenFunc
|
||||
@ -174,7 +174,5 @@ func main() {
|
||||
|
||||
fileHandler := file.NewHandler(fs)
|
||||
|
||||
run(issuer, openTPM, fileHandler, clusterInitJoiner,
|
||||
metadataAPI, bindIP,
|
||||
bindPort, zapLoggerCore, cloudLogger, fs)
|
||||
run(issuer, openTPM, fileHandler, clusterInitJoiner, metadataAPI, bindIP, bindPort, logger, cloudLogger)
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/oid"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -20,12 +19,12 @@ var version = "0.0.0"
|
||||
func run(issuer quoteIssuer, tpm vtpm.TPMOpenFunc, fileHandler file.Handler,
|
||||
kube clusterInitJoiner, metadata joinclient.MetadataAPI,
|
||||
bindIP, bindPort string, logger *zap.Logger,
|
||||
cloudLogger logging.CloudLogger, fs afero.Fs,
|
||||
cloudLogger logging.CloudLogger,
|
||||
) {
|
||||
defer logger.Sync()
|
||||
logger.Info("starting bootstrapper", zap.String("version", version))
|
||||
|
||||
defer cloudLogger.Close()
|
||||
|
||||
logger.Info("starting bootstrapper", zap.String("version", version))
|
||||
cloudLogger.Disclose("bootstrapper started running...")
|
||||
|
||||
nodeBootstrapped, err := vtpm.IsNodeBootstrapped(tpm)
|
||||
@ -41,17 +40,21 @@ func run(issuer quoteIssuer, tpm vtpm.TPMOpenFunc, fileHandler file.Handler,
|
||||
}
|
||||
|
||||
nodeLock := nodelock.New()
|
||||
initServer := initserver.New(nodeLock, kube, logger)
|
||||
initServer := initserver.New(nodeLock, kube, issuer, fileHandler, logger)
|
||||
|
||||
dialer := dialer.New(issuer, nil, &net.Dialer{})
|
||||
joinClient := joinclient.New(nodeLock, dialer, kube, metadata, logger)
|
||||
|
||||
joinClient.Start()
|
||||
defer joinClient.Stop()
|
||||
|
||||
if err := initServer.Serve(bindIP, bindPort); err != nil {
|
||||
logger.Error("Failed to serve init server", zap.Error(err))
|
||||
}
|
||||
|
||||
joinClient.Stop()
|
||||
|
||||
logger.Info("bootstrapper done")
|
||||
cloudLogger.Disclose("bootstrapper done")
|
||||
}
|
||||
|
||||
type clusterInitJoiner interface {
|
||||
|
@ -13,9 +13,11 @@ import (
|
||||
"github.com/edgelesssys/constellation/bootstrapper/nodestate"
|
||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||
"github.com/edgelesssys/constellation/bootstrapper/util"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
|
||||
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
|
||||
@ -41,21 +43,20 @@ type Server struct {
|
||||
}
|
||||
|
||||
// New creates a new initialization server.
|
||||
func New(lock *nodelock.Lock, kube ClusterInitializer, logger *zap.Logger) *Server {
|
||||
func New(lock *nodelock.Lock, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, logger *zap.Logger) *Server {
|
||||
logger = logger.Named("initServer")
|
||||
server := &Server{
|
||||
nodeLock: lock,
|
||||
disk: diskencryption.New(),
|
||||
initializer: kube,
|
||||
fileHandler: fh,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
creds := atlscredentials.New(issuer, nil)
|
||||
grpcLogger := logger.Named("gRPC")
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_zap.StreamServerInterceptor(grpcLogger),
|
||||
)),
|
||||
grpc.Creds(creds),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_zap.UnaryServerInterceptor(grpcLogger),
|
||||
@ -79,6 +80,8 @@ func (s *Server) Serve(ip, port string) error {
|
||||
|
||||
// Init initializes the cluster.
|
||||
func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initproto.InitResponse, error) {
|
||||
s.logger.Info("Init called")
|
||||
|
||||
if ok := s.nodeLock.TryLockOnce(); !ok {
|
||||
// The join client seems to already have a connection to an
|
||||
// existing join service. At this point, any further call to
|
||||
@ -86,6 +89,7 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
||||
//
|
||||
// The server stops itself after the current call is done.
|
||||
go s.grpcServer.GracefulStop()
|
||||
s.logger.Info("node is already in a join process")
|
||||
return nil, status.Error(codes.FailedPrecondition, "node is already being activated")
|
||||
}
|
||||
|
||||
@ -125,6 +129,7 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
||||
return nil, status.Errorf(codes.Internal, "initializing cluster: %s", err)
|
||||
}
|
||||
|
||||
s.logger.Info("Init succeeded")
|
||||
return &initproto.InitResponse{
|
||||
Kubeconfig: kubeconfig,
|
||||
OwnerId: id.Owner,
|
||||
|
@ -27,7 +27,8 @@ func TestMain(m *testing.M) {
|
||||
func TestNew(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
server := New(nodelock.New(), &stubClusterInitializer{}, zap.NewNop())
|
||||
fh := file.NewHandler(afero.NewMemMapFs())
|
||||
server := New(nodelock.New(), &stubClusterInitializer{}, nil, fh, zap.NewNop())
|
||||
assert.NotNil(server)
|
||||
assert.NotNil(server.logger)
|
||||
assert.NotNil(server.nodeLock)
|
||||
|
@ -25,8 +25,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
interval = 30 * time.Second
|
||||
timeout = 30 * time.Second
|
||||
interval = 30 * time.Second
|
||||
timeout = 30 * time.Second
|
||||
joinTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// JoinClient is a client for requesting the needed information and
|
||||
@ -39,9 +40,10 @@ type JoinClient struct {
|
||||
disk encryptedDisk
|
||||
fileHandler file.Handler
|
||||
|
||||
timeout time.Duration
|
||||
interval time.Duration
|
||||
clock clock.WithTicker
|
||||
timeout time.Duration
|
||||
joinTimeout time.Duration
|
||||
interval time.Duration
|
||||
clock clock.WithTicker
|
||||
|
||||
dialer grpcDialer
|
||||
joiner ClusterJoiner
|
||||
@ -57,9 +59,11 @@ type JoinClient struct {
|
||||
// New creates a new JoinClient.
|
||||
func New(lock *nodelock.Lock, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *zap.Logger) *JoinClient {
|
||||
return &JoinClient{
|
||||
nodeLock: lock,
|
||||
disk: diskencryption.New(),
|
||||
fileHandler: file.NewHandler(afero.NewOsFs()),
|
||||
timeout: timeout,
|
||||
joinTimeout: joinTimeout,
|
||||
interval: interval,
|
||||
clock: clock.RealClock{},
|
||||
dialer: dial,
|
||||
@ -202,10 +206,13 @@ func (c *JoinClient) join(serviceEndpoint string) error {
|
||||
return fmt.Errorf("issuing join ticket: %w", err)
|
||||
}
|
||||
|
||||
return c.startNodeAndJoin(ctx, ticket)
|
||||
return c.startNodeAndJoin(ticket)
|
||||
}
|
||||
|
||||
func (c *JoinClient) startNodeAndJoin(ctx context.Context, ticket *joinproto.IssueJoinTicketResponse) (retErr error) {
|
||||
func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse) (retErr error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.joinTimeout)
|
||||
defer cancel()
|
||||
|
||||
// If an error occurs in this func, the client cannot continue.
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
@ -235,7 +242,7 @@ func (c *JoinClient) startNodeAndJoin(ctx context.Context, ticket *joinproto.Iss
|
||||
|
||||
btd := &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: ticket.ApiServerEndpoint,
|
||||
Token: ticket.ApiServerEndpoint,
|
||||
Token: ticket.Token,
|
||||
CACertHashes: []string{ticket.DiscoveryTokenCaCertHash},
|
||||
}
|
||||
if err := c.joiner.JoinCluster(ctx, btd, ticket.CertificateKey, c.role); err != nil {
|
||||
@ -249,12 +256,12 @@ func (c *JoinClient) getNodeMetadata() error {
|
||||
ctx, cancel := c.timeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
c.log.Info("Requesting node metadata from metadata API")
|
||||
c.log.Debug("Requesting node metadata from metadata API")
|
||||
inst, err := c.metadataAPI.Self(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.log.Info("Received node metadata", zap.Any("instance", inst))
|
||||
c.log.Debug("Received node metadata", zap.Any("instance", inst))
|
||||
|
||||
if inst.Name == "" {
|
||||
return errors.New("got instance metadata with empty name")
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"github.com/edgelesssys/constellation/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/joinservice/joinproto"
|
||||
activationproto "github.com/edgelesssys/constellation/joinservice/joinproto"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -52,6 +51,8 @@ func TestClient(t *testing.T) {
|
||||
disk encryptedDisk
|
||||
nodeLock *nodelock.Lock
|
||||
apiAnswers []any
|
||||
wantLock bool
|
||||
wantJoin bool
|
||||
}{
|
||||
"on worker: metadata self: errors occur": {
|
||||
role: role.Worker,
|
||||
@ -66,6 +67,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on worker: metadata self: invalid answer": {
|
||||
role: role.Worker,
|
||||
@ -80,6 +83,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on worker: metadata list: errors occur": {
|
||||
role: role.Worker,
|
||||
@ -94,6 +99,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on worker: metadata list: no control plane nodes in answer": {
|
||||
role: role.Worker,
|
||||
@ -108,6 +115,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on worker: issueJoinTicket errors": {
|
||||
role: role.Worker,
|
||||
@ -123,6 +132,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on control plane: issueJoinTicket errors": {
|
||||
role: role.ControlPlane,
|
||||
@ -138,6 +149,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on control plane: joinCluster fails": {
|
||||
role: role.ControlPlane,
|
||||
@ -149,6 +162,8 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{joinClusterErr: someErr},
|
||||
nodeLock: nodelock.New(),
|
||||
disk: &stubDisk{},
|
||||
wantJoin: true,
|
||||
wantLock: true,
|
||||
},
|
||||
"on control plane: node already locked": {
|
||||
role: role.ControlPlane,
|
||||
@ -160,6 +175,7 @@ func TestClient(t *testing.T) {
|
||||
clusterJoiner: &stubClusterJoiner{},
|
||||
nodeLock: lockedLock,
|
||||
disk: &stubDisk{},
|
||||
wantLock: true,
|
||||
},
|
||||
"on control plane: disk open fails": {
|
||||
role: role.ControlPlane,
|
||||
@ -224,8 +240,16 @@ func TestClient(t *testing.T) {
|
||||
|
||||
client.Stop()
|
||||
|
||||
assert.True(tc.clusterJoiner.joinClusterCalled)
|
||||
assert.False(client.nodeLock.TryLockOnce()) // lock should be locked
|
||||
if tc.wantJoin {
|
||||
assert.True(tc.clusterJoiner.joinClusterCalled)
|
||||
} else {
|
||||
assert.False(tc.clusterJoiner.joinClusterCalled)
|
||||
}
|
||||
if tc.wantLock {
|
||||
assert.False(client.nodeLock.TryLockOnce()) // lock should be locked
|
||||
} else {
|
||||
assert.True(client.nodeLock.TryLockOnce())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -346,7 +370,7 @@ func (s *stubJoinServiceAPI) IssueJoinTicket(_ context.Context, _ *joinproto.Iss
|
||||
) (*joinproto.IssueJoinTicketResponse, error) {
|
||||
answer := <-s.issueJoinTicketAnswerC
|
||||
if answer.resp == nil {
|
||||
answer.resp = &activationproto.IssueJoinTicketResponse{}
|
||||
answer.resp = &joinproto.IssueJoinTicketResponse{}
|
||||
}
|
||||
return answer.resp, answer.err
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool) Kube
|
||||
},
|
||||
},
|
||||
},
|
||||
CertSANs: []string{"127.0.0.1", "10.118.0.1"},
|
||||
CertSANs: []string{"127.0.0.1"},
|
||||
},
|
||||
ControllerManager: kubeadm.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
|
@ -0,0 +1,179 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/internal/secrets"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type gcpGuestAgentDaemonset struct {
|
||||
DaemonSet apps.DaemonSet
|
||||
}
|
||||
|
||||
func NewGCPGuestAgentDaemonset() *gcpGuestAgentDaemonset {
|
||||
return &gcpGuestAgentDaemonset{
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "gcp-guest-agent",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "gcp-guest-agent",
|
||||
"component": "gcp-guest-agent",
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
},
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &meta.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "gcp-guest-agent",
|
||||
},
|
||||
},
|
||||
Template: k8s.PodTemplateSpec{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "gcp-guest-agent",
|
||||
},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
PriorityClassName: "system-cluster-critical",
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: []k8s.LocalObjectReference{
|
||||
{
|
||||
Name: secrets.PullSecretName,
|
||||
},
|
||||
},
|
||||
Containers: []k8s.Container{
|
||||
{
|
||||
Name: "gcp-guest-agent",
|
||||
Image: gcpGuestImage,
|
||||
SecurityContext: &k8s.SecurityContext{
|
||||
Privileged: func(b bool) *bool { return &b }(true),
|
||||
Capabilities: &k8s.Capabilities{
|
||||
Add: []k8s.Capability{"NET_ADMIN"},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "etcssl",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/ssl",
|
||||
},
|
||||
{
|
||||
Name: "etcpki",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/pki",
|
||||
},
|
||||
{
|
||||
Name: "bin",
|
||||
ReadOnly: true,
|
||||
MountPath: "/bin",
|
||||
},
|
||||
{
|
||||
Name: "usrbin",
|
||||
ReadOnly: true,
|
||||
MountPath: "/usr/bin",
|
||||
},
|
||||
{
|
||||
Name: "usr",
|
||||
ReadOnly: true,
|
||||
MountPath: "/usr",
|
||||
},
|
||||
{
|
||||
Name: "lib",
|
||||
ReadOnly: true,
|
||||
MountPath: "/lib",
|
||||
},
|
||||
{
|
||||
Name: "lib64",
|
||||
ReadOnly: true,
|
||||
MountPath: "/lib64",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []k8s.Volume{
|
||||
{
|
||||
Name: "etcssl",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/etc/ssl",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "etcpki",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/etc/pki",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "bin",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/bin",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "usrbin",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/usr/bin",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "usr",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/usr",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "lib",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/lib",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "lib64",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/lib64",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HostNetwork: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal marshals the access-manager deployment as YAML documents.
|
||||
func (c *gcpGuestAgentDaemonset) Marshal() ([]byte, error) {
|
||||
return MarshalK8SResources(c)
|
||||
}
|
@ -2,10 +2,11 @@ package resources
|
||||
|
||||
const (
|
||||
// Constellation images.
|
||||
joinImage = "ghcr.io/edgelesssys/constellation/join-service:v1.2"
|
||||
accessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v1.2"
|
||||
kmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v1.2"
|
||||
verificationImage = "ghcr.io/edgelesssys/constellation/verification-service:v1.2"
|
||||
joinImage = "ghcr.io/edgelesssys/constellation/join-service:feat-coordinator-selfactivation-node"
|
||||
accessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:feat-coordinator-selfactivation-node"
|
||||
kmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:feat-coordinator-selfactivation-node"
|
||||
verificationImage = "ghcr.io/edgelesssys/constellation/verification-service:feat-coordinator-selfactivation-node"
|
||||
gcpGuestImage = "ghcr.io/edgelesssys/gcp-guest-agent:latest"
|
||||
|
||||
// external images.
|
||||
clusterAutoscalerImage = "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.0"
|
||||
|
@ -22,7 +22,7 @@ type joinServiceDaemonset struct {
|
||||
}
|
||||
|
||||
// NewJoinServiceDaemonset returns a daemonset for the join service.
|
||||
func NewJoinServiceDaemonset(csp, measurementsJSON, idJSON string) *joinServiceDaemonset {
|
||||
func NewJoinServiceDaemonset(csp string, measurementsJSON, idJSON string) *joinServiceDaemonset {
|
||||
return &joinServiceDaemonset{
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
|
@ -90,13 +90,13 @@ func (k *KubernetesUtil) InitCluster(ctx context.Context, initConfig []byte) err
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating init config file %v: %w", initConfigFile.Name(), err)
|
||||
}
|
||||
defer os.Remove(initConfigFile.Name())
|
||||
// defer os.Remove(initConfigFile.Name())
|
||||
|
||||
if _, err := initConfigFile.Write(initConfig); err != nil {
|
||||
return fmt.Errorf("writing kubeadm init yaml config %v: %w", initConfigFile.Name(), err)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "init", "--config", initConfigFile.Name())
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "init", "-v=5", "--config", initConfigFile.Name())
|
||||
_, err = cmd.Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
@ -237,6 +237,11 @@ func (k *KubernetesUtil) SetupJoinService(kubectl Client, joinServiceConfigurati
|
||||
return kubectl.Apply(joinServiceConfiguration, true)
|
||||
}
|
||||
|
||||
// SetupGCPGuestAgent deploys the GCP guest agent daemon set.
|
||||
func (k *KubernetesUtil) SetupGCPGuestAgent(kubectl Client, guestAgentDaemonset resources.Marshaler) error {
|
||||
return kubectl.Apply(guestAgentDaemonset, true)
|
||||
}
|
||||
|
||||
// SetupCloudControllerManager deploys the k8s cloud-controller-manager.
|
||||
func (k *KubernetesUtil) SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error {
|
||||
if err := kubectl.Apply(configMaps, true); err != nil {
|
||||
@ -289,18 +294,18 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte) err
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating join config file %v: %w", joinConfigFile.Name(), err)
|
||||
}
|
||||
defer os.Remove(joinConfigFile.Name())
|
||||
// defer os.Remove(joinConfigFile.Name())
|
||||
|
||||
if _, err := joinConfigFile.Write(joinConfig); err != nil {
|
||||
return fmt.Errorf("writing kubeadm init yaml config %v: %w", joinConfigFile.Name(), err)
|
||||
}
|
||||
|
||||
// run `kubeadm join` to join a worker node to an existing Kubernetes cluster
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "join", "--config", joinConfigFile.Name())
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "join", "-v=5", "--config", joinConfigFile.Name())
|
||||
if _, err := cmd.Output(); err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
return fmt.Errorf("kubeadm join failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
|
||||
return fmt.Errorf("kubeadm join failed (code %v) with: %s (full err: %s)", exitErr.ExitCode(), exitErr.Stderr, err)
|
||||
}
|
||||
return fmt.Errorf("kubeadm join: %w", err)
|
||||
}
|
||||
@ -334,7 +339,7 @@ func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey(ctx context.Context)
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
return "", fmt.Errorf("kubeadm upload-certs failed (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
|
||||
return "", fmt.Errorf("kubeadm upload-certs failed (code %v) with: %s (full err: %s)", exitErr.ExitCode(), exitErr.Stderr, err)
|
||||
}
|
||||
return "", fmt.Errorf("kubeadm upload-certs: %w", err)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ type clusterUtil interface {
|
||||
SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
SetupKMS(kubectl k8sapi.Client, kmsConfiguration resources.Marshaler) error
|
||||
SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration resources.Marshaler) error
|
||||
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration resources.Marshaler) error
|
||||
StartKubelet() error
|
||||
RestartKubelet() error
|
||||
GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error)
|
||||
|
@ -3,7 +3,9 @@ package kubernetes
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -90,8 +92,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
var publicIP string
|
||||
var nodePodCIDR string
|
||||
var subnetworkPodCIDR string
|
||||
// this is the IP in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>" hence the unfortunate name
|
||||
var controlPlaneEndpointIP string
|
||||
var controlPlaneEndpointIP string // this is the IP in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>" hence the unfortunate name
|
||||
var nodeIP string
|
||||
|
||||
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
||||
@ -121,6 +122,11 @@ func (k *KubeWrapper) InitCluster(
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving load balancer IP failed: %w", err)
|
||||
}
|
||||
if k.cloudProvider == "gcp" {
|
||||
if err := manuallySetLoadbalancerIP(ctx, controlPlaneEndpointIP); err != nil {
|
||||
return nil, fmt.Errorf("setting load balancer IP failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,6 +194,12 @@ func (k *KubeWrapper) InitCluster(
|
||||
return nil, fmt.Errorf("failed to setup verification service: %w", err)
|
||||
}
|
||||
|
||||
if k.cloudProvider == "gcp" {
|
||||
if err := k.clusterUtil.SetupGCPGuestAgent(k.client, resources.NewGCPGuestAgentDaemonset()); err != nil {
|
||||
return nil, fmt.Errorf("failed to setup gcp guest agent: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
go k.clusterUtil.FixCilium(nodeName)
|
||||
|
||||
return k.GetKubeconfig()
|
||||
@ -247,15 +259,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
||||
|
||||
// GetKubeconfig returns the current nodes kubeconfig of stored on disk.
|
||||
func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
|
||||
kubeconf, err := k.kubeconfigReader.ReadKubeconfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// replace the cluster.Server endpoint (127.0.0.1:16443) in admin.conf with the first bootstrapper endpoint (10.118.0.1:6443)
|
||||
// kube-api server listens on 10.118.0.1:6443
|
||||
// 127.0.0.1:16443 is the high availability balancer nginx endpoint, runnining localy on all nodes
|
||||
// alternatively one could also start a local high availability balancer.
|
||||
return []byte(strings.ReplaceAll(string(kubeconf), "127.0.0.1:16443", "10.118.0.1:6443")), nil
|
||||
return k.kubeconfigReader.ReadKubeconfig()
|
||||
}
|
||||
|
||||
// GetKubeadmCertificateKey return the key needed to join the Cluster as Control-Plane (has to be executed on a control-plane; errors otherwise).
|
||||
@ -335,6 +339,27 @@ func (k *KubeWrapper) setupClusterAutoscaler(instance metadata.InstanceMetadata,
|
||||
return nil
|
||||
}
|
||||
|
||||
// manuallySetLoadbalancerIP sets the loadbalancer IP of the first control plane during init.
|
||||
// The GCP guest agent does this usually, but is deployed in the cluster that doesn't exist
|
||||
// at this point. This is a workaround to set the loadbalancer IP manually, so kubeadm and kubelet
|
||||
// can talk to the local Kubernetes API server using the loadbalancer IP.
|
||||
func manuallySetLoadbalancerIP(ctx context.Context, ip string) error {
|
||||
// https://github.com/GoogleCloudPlatform/guest-agent/blob/792fce795218633bcbde505fb3457a0b24f26d37/google_guest_agent/addresses.go#L179
|
||||
if !strings.Contains(ip, "/") {
|
||||
ip = ip + "/32"
|
||||
}
|
||||
args := fmt.Sprintf("route add to local %s scope host dev ens3 proto 66", ip)
|
||||
_, err := exec.CommandContext(ctx, "ip", strings.Split(args, " ")...).Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
return fmt.Errorf("ip route add (code %v) with: %s", exitErr.ExitCode(), exitErr.Stderr)
|
||||
}
|
||||
return fmt.Errorf("ip route add: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
||||
// The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ .
|
||||
// Only a simple heuristic is used for now (to lowercase, replace underscores).
|
||||
|
@ -441,34 +441,6 @@ func TestJoinCluster(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetKubeconfig(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
Kubewrapper KubeWrapper
|
||||
wantErr bool
|
||||
}{
|
||||
"check single replacement": {
|
||||
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("127.0.0.1:16443"),
|
||||
}},
|
||||
},
|
||||
"check multiple replacement": {
|
||||
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("127.0.0.1:16443...127.0.0.1:16443"),
|
||||
}},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
data, err := tc.Kubewrapper.GetKubeconfig()
|
||||
require.NoError(err)
|
||||
assert.NotContains(string(data), "127.0.0.1:16443")
|
||||
assert.Contains(string(data), "10.118.0.1:6443")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestK8sCompliantHostname(t *testing.T) {
|
||||
compliantHostname := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
||||
testCases := map[string]struct {
|
||||
@ -512,6 +484,7 @@ type stubClusterUtil struct {
|
||||
setupKMSError error
|
||||
setupAccessManagerError error
|
||||
setupVerificationServiceErr error
|
||||
setupGCPGuestAgentErr error
|
||||
joinClusterErr error
|
||||
startKubeletErr error
|
||||
restartKubeletErr error
|
||||
@ -543,6 +516,10 @@ func (s *stubClusterUtil) SetupJoinService(kubectl k8sapi.Client, joinServiceCon
|
||||
return s.setupJoinServiceError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration resources.Marshaler) error {
|
||||
return s.setupGCPGuestAgentErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error {
|
||||
return s.setupCloudControllerManagerError
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func initialize(cmd *cobra.Command, dialer grpcDialer, serviceAccCreator service
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeOutput(resp, cmd.OutOrStdout(), fileHandler); err != nil {
|
||||
if err := writeOutput(resp, controlPlanes.PublicIPs()[0], cmd.OutOrStdout(), fileHandler); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -160,21 +160,25 @@ func (d *initDoer) Do(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("dialing init server: %w", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
protoClient := initproto.NewAPIClient(conn)
|
||||
resp, err := protoClient.Init(ctx, d.req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshalling VPN config: %w", err)
|
||||
return fmt.Errorf("init call: %w", err)
|
||||
}
|
||||
d.resp = resp
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeOutput(resp *initproto.InitResponse, wr io.Writer, fileHandler file.Handler) error {
|
||||
func writeOutput(resp *initproto.InitResponse, ip string, wr io.Writer, fileHandler file.Handler) error {
|
||||
fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n")
|
||||
|
||||
ownerID := base64.StdEncoding.EncodeToString(resp.OwnerId)
|
||||
clusterID := base64.StdEncoding.EncodeToString(resp.ClusterId)
|
||||
|
||||
tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0)
|
||||
writeRow(tw, "Constellation cluster's owner identifier", string(resp.OwnerId))
|
||||
writeRow(tw, "Constellation cluster's unique identifier", string(resp.ClusterId))
|
||||
writeRow(tw, "Constellation cluster's owner identifier", ownerID)
|
||||
writeRow(tw, "Constellation cluster's unique identifier", clusterID)
|
||||
writeRow(tw, "Kubernetes configuration", constants.AdminConfFilename)
|
||||
tw.Flush()
|
||||
fmt.Fprintln(wr)
|
||||
@ -183,7 +187,11 @@ func writeOutput(resp *initproto.InitResponse, wr io.Writer, fileHandler file.Ha
|
||||
return fmt.Errorf("write kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
idFile := clusterIDsFile{ClusterID: r.clusterID, OwnerID: r.ownerID, Endpoint: r.coordinatorPubIP}
|
||||
idFile := clusterIDsFile{
|
||||
ClusterID: clusterID,
|
||||
OwnerID: ownerID,
|
||||
Endpoint: net.JoinHostPort(ip, strconv.Itoa(constants.VerifyServiceNodePortGRPC)),
|
||||
}
|
||||
if err := fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile, file.OptNone); err != nil {
|
||||
return fmt.Errorf("writing Constellation id file: %w", err)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -72,7 +73,7 @@ func TestInitialize(t *testing.T) {
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
}
|
||||
// someErr := errors.New("failed")
|
||||
someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
existingState state.ConstellationState
|
||||
@ -103,37 +104,22 @@ func TestInitialize(t *testing.T) {
|
||||
initServerAPI: &stubInitServer{initResp: testInitResp},
|
||||
setAutoscaleFlag: true,
|
||||
},
|
||||
// "no state exists": {
|
||||
// existingState: state.ConstellationState{},
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "no instances to pick one": {
|
||||
// existingState: state.ConstellationState{GCPNodes: cloudtypes.Instances{}},
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail Connect": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail Activate": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail to wait for required status": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// wantErr: true,
|
||||
// },
|
||||
// "fail to create service account": {
|
||||
// existingState: testGcpState,
|
||||
// initServerAPI: &stubInitServer{},
|
||||
// serviceAccountCreator: stubServiceAccountCreator{createErr: someErr},
|
||||
// wantErr: true,
|
||||
// },
|
||||
"empty state": {
|
||||
existingState: state.ConstellationState{},
|
||||
initServerAPI: &stubInitServer{},
|
||||
wantErr: true,
|
||||
},
|
||||
"init call fails": {
|
||||
existingState: testGcpState,
|
||||
initServerAPI: &stubInitServer{initErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
"fail to create service account": {
|
||||
existingState: testGcpState,
|
||||
initServerAPI: &stubInitServer{},
|
||||
serviceAccountCreator: stubServiceAccountCreator{createErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -174,8 +160,8 @@ func TestInitialize(t *testing.T) {
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Contains(out.String(), "ownerID")
|
||||
assert.Contains(out.String(), "clusterID")
|
||||
assert.Contains(out.String(), base64.StdEncoding.EncodeToString([]byte("ownerID")))
|
||||
assert.Contains(out.String(), base64.StdEncoding.EncodeToString([]byte("clusterID")))
|
||||
if tc.setAutoscaleFlag {
|
||||
assert.Len(tc.initServerAPI.activateAutoscalingNodeGroups, 1)
|
||||
} else {
|
||||
@ -194,26 +180,29 @@ func TestWriteOutput(t *testing.T) {
|
||||
Kubeconfig: []byte("kubeconfig"),
|
||||
}
|
||||
|
||||
ownerID := base64.StdEncoding.EncodeToString(resp.OwnerId)
|
||||
clusterID := base64.StdEncoding.EncodeToString(resp.ClusterId)
|
||||
|
||||
expectedIdFile := clusterIDsFile{
|
||||
ClusterID: string(resp.ClusterId),
|
||||
OwnerID: string(resp.OwnerId),
|
||||
ClusterID: clusterID,
|
||||
OwnerID: ownerID,
|
||||
Endpoint: net.JoinHostPort("ip", strconv.Itoa(constants.VerifyServiceNodePortGRPC)),
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
testFs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(testFs)
|
||||
|
||||
err := writeOutput(resp, &out, fileHandler)
|
||||
err := writeOutput(resp, "ip", &out, fileHandler)
|
||||
assert.NoError(err)
|
||||
assert.Contains(out.String(), string(resp.OwnerId))
|
||||
assert.Contains(out.String(), string(resp.ClusterId))
|
||||
assert.Contains(out.String(), ownerID)
|
||||
assert.Contains(out.String(), clusterID)
|
||||
assert.Contains(out.String(), constants.AdminConfFilename)
|
||||
assert.Equal(resp.Kubeconfig, string(resp.Kubeconfig))
|
||||
|
||||
afs := afero.Afero{Fs: testFs}
|
||||
adminConf, err := afs.ReadFile(constants.AdminConfFilename)
|
||||
assert.NoError(err)
|
||||
assert.Equal(resp.Kubeconfig, string(adminConf))
|
||||
assert.Equal(string(resp.Kubeconfig), string(adminConf))
|
||||
|
||||
idsFile, err := afs.ReadFile(constants.ClusterIDsFileName)
|
||||
assert.NoError(err)
|
||||
|
@ -140,7 +140,7 @@ func parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handler) (verifyFlags
|
||||
if ownerID == "" && clusterID == "" {
|
||||
return verifyFlags{}, errors.New("neither owner-id nor unique-id provided to verify the cluster")
|
||||
}
|
||||
endpoint, err = validateEndpoint(endpoint, constants.CoordinatorPort)
|
||||
endpoint, err = validateEndpoint(endpoint, constants.BootstrapperPort)
|
||||
if err != nil {
|
||||
return verifyFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@ -76,9 +76,7 @@ require (
|
||||
github.com/googleapis/gax-go/v2 v2.2.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/kr/text v0.2.0
|
||||
github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6
|
||||
github.com/martinjungblut/go-cryptsetup v0.0.0-20220421194528-92e17766b2e7
|
||||
github.com/microsoft/ApplicationInsights-Go v0.4.4
|
||||
github.com/schollz/progressbar/v3 v3.8.6
|
||||
github.com/spf13/afero v1.8.2
|
||||
|
2
go.sum
2
go.sum
@ -514,8 +514,6 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
|
||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
|
||||
github.com/daniel-weisse/go-cryptsetup v0.0.0-20220511084044-b537356aa24b h1:YGmoG92uSwGnXqoYYIAAXZSJ33Ogc13GXgnBbDr8p5o=
|
||||
github.com/daniel-weisse/go-cryptsetup v0.0.0-20220511084044-b537356aa24b/go.mod h1:gZoZ0+POlM1ge/VUxWpMmZVNPzzMJ7l436CgkQ5+qzU=
|
||||
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
|
@ -9,9 +9,9 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||
"github.com/edgelesssys/constellation/hack/qemu-metadata-api/virtwrapper"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/logger"
|
||||
"go.uber.org/zap"
|
||||
@ -203,7 +203,7 @@ func (s *Server) exportPCRs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// listAll returns a list of all active peers.
|
||||
func (s *Server) listAll() ([]cloudtypes.Instance, error) {
|
||||
func (s *Server) listAll() ([]metadata.InstanceMetadata, error) {
|
||||
net, err := s.virt.LookupNetworkByName("constellation")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -214,15 +214,15 @@ func (s *Server) listAll() ([]cloudtypes.Instance, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var peers []cloudtypes.Instance
|
||||
var peers []metadata.InstanceMetadata
|
||||
|
||||
for _, lease := range leases {
|
||||
instanceRole := role.Node
|
||||
instanceRole := role.Worker
|
||||
if strings.HasPrefix(lease.Hostname, "control-plane") {
|
||||
instanceRole = role.Coordinator
|
||||
instanceRole = role.ControlPlane
|
||||
}
|
||||
|
||||
peers = append(peers, cloudtypes.Instance{
|
||||
peers = append(peers, metadata.InstanceMetadata{
|
||||
Name: lease.Hostname,
|
||||
Role: instanceRole,
|
||||
PrivateIPs: []string{lease.IPaddr},
|
||||
|
@ -9,8 +9,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/hack/qemu-metadata-api/virtwrapper"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/logger"
|
||||
"github.com/spf13/afero"
|
||||
@ -160,7 +160,7 @@ func TestListSelf(t *testing.T) {
|
||||
metadataRaw, err := io.ReadAll(w.Body)
|
||||
require.NoError(err)
|
||||
|
||||
var metadata cloudtypes.Instance
|
||||
var metadata metadata.InstanceMetadata
|
||||
require.NoError(json.Unmarshal(metadataRaw, &metadata))
|
||||
assert.Equal(tc.connect.network.leases[0].Hostname, metadata.Name)
|
||||
assert.Equal(tc.connect.network.leases[0].IPaddr, metadata.PublicIPs[0])
|
||||
@ -222,7 +222,7 @@ func TestListPeers(t *testing.T) {
|
||||
metadataRaw, err := io.ReadAll(w.Body)
|
||||
require.NoError(err)
|
||||
|
||||
var metadata []cloudtypes.Instance
|
||||
var metadata []metadata.InstanceMetadata
|
||||
require.NoError(json.Unmarshal(metadataRaw, &metadata))
|
||||
assert.Len(metadata, len(tc.connect.network.leases))
|
||||
})
|
||||
|
@ -1,11 +1,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
azurecloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/azure"
|
||||
gcpcloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/gcp"
|
||||
qemucloud "github.com/edgelesssys/constellation/bootstrapper/cloudprovider/qemu"
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/atlscredentials"
|
||||
@ -23,8 +30,11 @@ func main() {
|
||||
provider := flag.String("cloud-provider", "", "cloud service provider this binary is running on")
|
||||
kmsEndpoint := flag.String("kms-endpoint", "", "endpoint of Constellations key management service")
|
||||
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity))
|
||||
|
||||
log.With(zap.String("version", constants.VersionInfo), zap.String("cloudProvider", *provider)).
|
||||
@ -40,7 +50,12 @@ func main() {
|
||||
|
||||
creds := atlscredentials.New(nil, []atls.Validator{validator})
|
||||
|
||||
kubeadm, err := kubeadm.New(log.Named("kubeadm"))
|
||||
vpcIP, err := getIPinVPC(ctx, *provider)
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to get IP in VPC")
|
||||
}
|
||||
apiServerEndpoint := net.JoinHostPort(vpcIP, strconv.Itoa(constants.KubernetesPort))
|
||||
kubeadm, err := kubeadm.New(apiServerEndpoint, log.Named("kubeadm"))
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to create kubeadm")
|
||||
}
|
||||
@ -71,3 +86,41 @@ func main() {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to run server")
|
||||
}
|
||||
}
|
||||
|
||||
func getIPinVPC(ctx context.Context, provider string) (string, error) {
|
||||
switch cloudprovider.FromString(provider) {
|
||||
case cloudprovider.Azure:
|
||||
metadata, err := azurecloud.NewMetadata(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
self, err := metadata.Self(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return self.PrivateIPs[0], nil
|
||||
case cloudprovider.GCP:
|
||||
gcpClient, err := gcpcloud.NewClient(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
metadata := gcpcloud.New(gcpClient)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
self, err := metadata.Self(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return self.PrivateIPs[0], nil
|
||||
case cloudprovider.QEMU:
|
||||
metadata := &qemucloud.Metadata{}
|
||||
self, err := metadata.Self(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return self.PrivateIPs[0], nil
|
||||
default:
|
||||
return "", errors.New("unsupported cloud provider")
|
||||
}
|
||||
}
|
||||
|
@ -26,13 +26,14 @@ import (
|
||||
|
||||
// Kubeadm manages joining of new nodes.
|
||||
type Kubeadm struct {
|
||||
log *logger.Logger
|
||||
client clientset.Interface
|
||||
file file.Handler
|
||||
apiServerEndpoint string
|
||||
log *logger.Logger
|
||||
client clientset.Interface
|
||||
file file.Handler
|
||||
}
|
||||
|
||||
// New creates a new Kubeadm instance.
|
||||
func New(log *logger.Logger) (*Kubeadm, error) {
|
||||
func New(apiServerEndpoint string, log *logger.Logger) (*Kubeadm, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get in-cluster config: %w", err)
|
||||
@ -44,9 +45,10 @@ func New(log *logger.Logger) (*Kubeadm, error) {
|
||||
file := file.NewHandler(afero.NewOsFs())
|
||||
|
||||
return &Kubeadm{
|
||||
log: log,
|
||||
client: client,
|
||||
file: file,
|
||||
apiServerEndpoint: apiServerEndpoint,
|
||||
log: log,
|
||||
client: client,
|
||||
file: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -65,6 +67,8 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov
|
||||
Token: tokenStr,
|
||||
Description: "Bootstrap token generated by Constellation's Join service",
|
||||
TTL: &metav1.Duration{Duration: ttl},
|
||||
Usages: []string{"signing", "authentication"},
|
||||
Groups: []string{"system:bootstrappers:kubeadm:default-node-token"},
|
||||
}
|
||||
|
||||
// create the token in Kubernetes
|
||||
@ -99,7 +103,7 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov
|
||||
k.log.Infof("Join token creation successful")
|
||||
return &kubeadm.BootstrapTokenDiscovery{
|
||||
Token: tokenStr.String(),
|
||||
APIServerEndpoint: "10.118.0.1:6443", // This is not HA and should be replaced with the IP of the node issuing the token
|
||||
APIServerEndpoint: k.apiServerEndpoint,
|
||||
CACertHashes: publicKeyPins,
|
||||
}, nil
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// ConstellationKMS is a key service using the Constellation Coordinator to fetch volume keys.
|
||||
// ConstellationKMS is a key service to fetch volume keys.
|
||||
type ConstellationKMS struct {
|
||||
endpoint string
|
||||
kms kmsClient
|
||||
@ -23,7 +23,7 @@ func NewConstellationKMS(coordinatorEndpoint string) *ConstellationKMS {
|
||||
}
|
||||
}
|
||||
|
||||
// GetDEK connects to the Constellation Coordinators VPN API to request a data encryption key derived from the Constellation's master secret.
|
||||
// GetDEK request a data encryption key derived from the Constellation's master secret.
|
||||
func (k *ConstellationKMS) GetDEK(ctx context.Context, dekID string, dekSize int) ([]byte, error) {
|
||||
conn, err := grpc.DialContext(ctx, k.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
|
@ -1,21 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/bootstrapper/role"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/metadata"
|
||||
)
|
||||
|
||||
type fakeMetadataAPI struct{}
|
||||
|
||||
func (f *fakeMetadataAPI) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
|
||||
return []metadata.InstanceMetadata{
|
||||
{
|
||||
Name: "instanceName",
|
||||
ProviderID: "fake://instance-id",
|
||||
Role: role.Unknown,
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
}, nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user