mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
AB#2111 Deploy activation service on cluster init (#205)
* Deploy activation service on cluster init * Use base image with CA certificates for activation service * Improve KMS server Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
84ca9e3070
commit
4842d29aff
@ -10,7 +10,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Added `constellation-access-manager`, allowing users to manage SSH users over a ConfigMap. This allows persistent & dynamic management of SSH users on multiple nodes, even after a reboot.
|
||||
|
||||
### Changed
|
||||
- Moved KMS image build instructions to `Dockerfile.services` to have a centralized Dockerfile for all in-repo microservices.
|
||||
|
||||
### Removed
|
||||
|
||||
@ -20,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- GCP WireGuard encryption via cilium
|
||||
|
||||
### Internal
|
||||
- Added `constellation-activation-service`, offloading new Kubernetes node activation from monolithic Coordinator to Kubernetes native micro-service
|
||||
|
||||
## [1.2.0] - 2022-06-02
|
||||
### Added
|
||||
|
@ -22,7 +22,8 @@ RUN rm -rf ./hack/
|
||||
# Build
|
||||
RUN mkdir -p /constellation/build
|
||||
WORKDIR /constellation/kms/server/cmd
|
||||
RUN CGO_ENABLED=0 go build -o /constellation/build/kmsserver
|
||||
ARG PROJECT_VERSION=0.0.0
|
||||
RUN CGO_ENABLED=0 go build -o /constellation/build/kmsserver -trimpath -buildvcs=false -ldflags "-s -w -buildid='' -X github.com/edgelesssys/constellation/internal/constants.VersionInfo=${PROJECT_VERSION}"
|
||||
|
||||
FROM scratch as release
|
||||
COPY --from=build /constellation/build/kmsserver /kmsserver
|
||||
|
@ -22,9 +22,10 @@ COPY . /constellation
|
||||
RUN rm -rf ./hack/
|
||||
|
||||
WORKDIR /constellation/activation
|
||||
ARG PROJECT_VERSION=v0.0.0
|
||||
RUN CGO_ENABLED=0 go build -o activation-service -trimpath -buildvcs=false -ldflags "-s -w -buildid='' -X main.versionInfo=${PROJECT_VERSION}" ./cmd/
|
||||
ARG PROJECT_VERSION=0.0.0
|
||||
RUN CGO_ENABLED=0 go build -o activation-service -trimpath -buildvcs=false -ldflags "-s -w -buildid='' -X github.com/edgelesssys/constellation/internal/constants.VersionInfo=${PROJECT_VERSION}" ./cmd/
|
||||
|
||||
FROM scratch as release
|
||||
# We would like to use a scratch image here, but we require CA certificates to be installed for some operations.
|
||||
FROM fedora@sha256:36af84ba69e21c9ef86a0424a090674c433b2b80c2462e57503886f1d823abe8 as release
|
||||
COPY --from=build /constellation/activation/activation-service /activation
|
||||
ENTRYPOINT [ "/activation" ]
|
||||
|
@ -2,6 +2,8 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/edgelesssys/constellation/activation/kms"
|
||||
"github.com/edgelesssys/constellation/activation/kubeadm"
|
||||
@ -17,17 +19,15 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
bindPort = "9090"
|
||||
)
|
||||
|
||||
func main() {
|
||||
provider := flag.String("cloud-provider", "", "cloud service provider this binary is running on")
|
||||
kmsEndpoint := flag.String("kms-endpoint", "", "endpoint of Constellations key management service")
|
||||
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
klog.V(2).Infof("\nConstellation Node Activation Service\nVersion: v%s\nRunning on: %s", constants.VersionInfo, *provider)
|
||||
defer klog.Flush()
|
||||
|
||||
klog.V(2).Infof("\nConstellation Node Activation Service\nVersion: %s\nRunning on: %s", constants.VersionInfo, *provider)
|
||||
|
||||
handler := file.NewHandler(afero.NewOsFs())
|
||||
|
||||
@ -54,13 +54,13 @@ func main() {
|
||||
defer watcher.Close()
|
||||
|
||||
go func() {
|
||||
klog.V(4).Infof("starting file watcher for measurements file %s", constants.ActivationMeasurementsFilename)
|
||||
if err := watcher.Watch(constants.ActivationMeasurementsFilename); err != nil {
|
||||
klog.V(4).Infof("starting file watcher for measurements file %s", filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename))
|
||||
if err := watcher.Watch(filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename)); err != nil {
|
||||
klog.Exitf("failed to watch measurements file: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := server.Run(creds, bindPort); err != nil {
|
||||
if err := server.Run(creds, strconv.Itoa(constants.ActivationServicePort)); err != nil {
|
||||
klog.Exitf("failed to run server: %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Client interacts with Constellation's key management service.
|
||||
@ -33,6 +34,7 @@ func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
klog.V(6).Infof("GetDataKey: connecting to KMS at %s", c.endpoint)
|
||||
res, err := c.grpc.GetDataKey(
|
||||
ctx,
|
||||
&kmsproto.GetDataKeyRequest{
|
||||
|
@ -4,11 +4,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
proto "github.com/edgelesssys/constellation/activation/activationproto"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/grpc_klog"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@ -41,7 +44,7 @@ func New(fileHandler file.Handler, ca certificateAuthority, joinTokenGetter join
|
||||
func (s *Server) Run(creds credentials.TransportCredentials, port string) error {
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.Creds(creds),
|
||||
grpc.UnaryInterceptor(logGRPC),
|
||||
grpc.UnaryInterceptor(grpc_klog.LogGRPC(2)),
|
||||
)
|
||||
|
||||
proto.RegisterAPIServer(grpcServer, s)
|
||||
@ -61,8 +64,8 @@ func (s *Server) Run(creds credentials.TransportCredentials, port string) error
|
||||
// - cluster and owner ID to taint the node as initialized.
|
||||
func (s *Server) ActivateNode(ctx context.Context, req *proto.ActivateNodeRequest) (*proto.ActivateNodeResponse, error) {
|
||||
klog.V(4).Info("ActivateNode: loading IDs")
|
||||
var id id
|
||||
if err := s.file.ReadJSON(constants.ActivationIDFilename, &id); err != nil {
|
||||
var id attestationtypes.ID
|
||||
if err := s.file.ReadJSON(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), &id); err != nil {
|
||||
klog.Errorf("unable to load IDs: %s", err)
|
||||
return nil, status.Errorf(codes.Internal, "unable to load IDs: %s", err)
|
||||
}
|
||||
@ -122,21 +125,3 @@ type certificateAuthority interface {
|
||||
// GetCertificate returns a certificate and private key, signed by the issuer.
|
||||
GetCertificate(nodeName string) (kubeletCert []byte, kubeletKey []byte, err error)
|
||||
}
|
||||
|
||||
type id struct {
|
||||
Cluster []byte `json:"cluster"`
|
||||
Owner []byte `json:"owner"`
|
||||
}
|
||||
|
||||
// logGRPC writes a log with the name of every gRPC call or error it receives.
|
||||
func logGRPC(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
// log the requests method name
|
||||
klog.V(2).Infof("GRPC call: %s", info.FullMethod)
|
||||
|
||||
// log errors, if any
|
||||
resp, err := handler(ctx, req)
|
||||
if err != nil {
|
||||
klog.Errorf("GRPC error: %v", err)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
@ -4,10 +4,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
proto "github.com/edgelesssys/constellation/activation/activationproto"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
@ -20,7 +22,7 @@ func TestActivateNode(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testKey := []byte{0x1, 0x2, 0x3}
|
||||
testCert := []byte{0x4, 0x5, 0x6}
|
||||
testID := id{
|
||||
testID := attestationtypes.ID{
|
||||
Owner: []byte{0x4, 0x5, 0x6},
|
||||
Cluster: []byte{0x7, 0x8, 0x9},
|
||||
}
|
||||
@ -127,7 +129,7 @@ func TestActivateNode(t *testing.T) {
|
||||
|
||||
file := file.NewHandler(afero.NewMemMapFs())
|
||||
if len(tc.id) > 0 {
|
||||
require.NoError(file.Write(constants.ActivationIDFilename, tc.id, 0o644))
|
||||
require.NoError(file.Write(filepath.Join(constants.ActivationBasePath, constants.ActivationIDFilename), tc.id, 0o644))
|
||||
}
|
||||
api := New(file, tc.ca, tc.kubeadm, tc.kms)
|
||||
|
||||
@ -137,7 +139,7 @@ func TestActivateNode(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
var expectedIDs id
|
||||
var expectedIDs attestationtypes.ID
|
||||
require.NoError(json.Unmarshal(tc.id, &expectedIDs))
|
||||
|
||||
require.NoError(err)
|
||||
@ -153,7 +155,7 @@ func TestActivateNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mustMarshalID(id id) []byte {
|
||||
func mustMarshalID(id attestationtypes.ID) []byte {
|
||||
b, err := json.Marshal(id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -3,6 +3,7 @@ package validator
|
||||
import (
|
||||
"encoding/asn1"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/atls"
|
||||
@ -68,7 +69,7 @@ func (u *Updatable) Update() error {
|
||||
klog.V(4).Info("Updating expected measurements")
|
||||
|
||||
var measurements map[uint32][]byte
|
||||
if err := u.fileHandler.ReadJSON(constants.ActivationMeasurementsFilename, &measurements); err != nil {
|
||||
if err := u.fileHandler.ReadJSON(filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename), &measurements); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(6).Infof("New measurements: %v", measurements)
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@ -58,7 +59,7 @@ func TestNewUpdateableValidator(t *testing.T) {
|
||||
handler := file.NewHandler(afero.NewMemMapFs())
|
||||
if tc.writeFile {
|
||||
require.NoError(handler.WriteJSON(
|
||||
constants.ActivationMeasurementsFilename,
|
||||
filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename),
|
||||
map[uint32][]byte{
|
||||
11: {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
@ -94,7 +95,7 @@ func TestUpdate(t *testing.T) {
|
||||
|
||||
// write measurement config
|
||||
require.NoError(handler.WriteJSON(
|
||||
constants.ActivationMeasurementsFilename,
|
||||
filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename),
|
||||
map[uint32][]byte{
|
||||
11: {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
@ -144,7 +145,7 @@ func TestUpdateConcurrency(t *testing.T) {
|
||||
},
|
||||
}
|
||||
require.NoError(handler.WriteJSON(
|
||||
constants.ActivationMeasurementsFilename,
|
||||
filepath.Join(constants.ActivationBasePath, constants.ActivationMeasurementsFilename),
|
||||
map[uint32][]byte{
|
||||
11: {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
|
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
@ -101,7 +102,14 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
coreMetadata = metadata
|
||||
kube = kubernetes.New("gcp", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &gcpcloud.CloudControllerManager{}, &gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata)
|
||||
pcrsJSON, err := json.Marshal(pcrs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kube = kubernetes.New(
|
||||
"gcp", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &gcpcloud.CloudControllerManager{},
|
||||
&gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata, pcrsJSON,
|
||||
)
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
@ -127,7 +135,14 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
coreMetadata = metadata
|
||||
kube = kubernetes.New("azure", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), azurecloud.NewCloudControllerManager(metadata), &azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata)
|
||||
pcrsJSON, err := json.Marshal(pcrs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kube = kubernetes.New(
|
||||
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), azurecloud.NewCloudControllerManager(metadata),
|
||||
&azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata, pcrsJSON,
|
||||
)
|
||||
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
@ -148,7 +163,14 @@ func main() {
|
||||
// no support for cloud services in qemu
|
||||
metadata := &qemucloud.Metadata{}
|
||||
cloudLogger = &logging.NopLogger{}
|
||||
kube = kubernetes.New("qemu", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &qemucloud.CloudControllerManager{}, &qemucloud.CloudNodeManager{}, &qemucloud.Autoscaler{}, metadata)
|
||||
pcrsJSON, err := json.Marshal(pcrs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kube = kubernetes.New(
|
||||
"qemu", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &qemucloud.CloudControllerManager{},
|
||||
&qemucloud.CloudNodeManager{}, &qemucloud.Autoscaler{}, metadata, pcrsJSON,
|
||||
)
|
||||
coreMetadata = metadata
|
||||
|
||||
encryptedDisk = diskencryption.New()
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"go.uber.org/zap"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
@ -22,7 +23,9 @@ func (c *Core) GetK8SCertificateKey(ctx context.Context) (string, error) {
|
||||
}
|
||||
|
||||
// InitCluster initializes the cluster, stores the join args, and returns the kubeconfig.
|
||||
func (c *Core) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte, sshUsers []*pubproto.SSHUserKey) ([]byte, error) {
|
||||
func (c *Core) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, id attestationtypes.ID, masterSecret []byte, sshUsers []*pubproto.SSHUserKey,
|
||||
) ([]byte, error) {
|
||||
c.zaplogger.Info("Initializing cluster")
|
||||
vpnIP, err := c.GetVPNIP()
|
||||
if err != nil {
|
||||
@ -37,7 +40,7 @@ func (c *Core) InitCluster(ctx context.Context, autoscalingNodeGroups []string,
|
||||
sshUsersMap[value.Username] = value.PublicKey
|
||||
}
|
||||
}
|
||||
if err := c.kube.InitCluster(ctx, autoscalingNodeGroups, cloudServiceAccountURI, vpnIP, masterSecret, sshUsersMap); err != nil {
|
||||
if err := c.kube.InitCluster(ctx, autoscalingNodeGroups, cloudServiceAccountURI, vpnIP, id, masterSecret, sshUsersMap); err != nil {
|
||||
c.zaplogger.Error("Initializing cluster failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
@ -89,7 +92,9 @@ func (c *Core) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDisc
|
||||
// Cluster manages the overall cluster lifecycle (init, join).
|
||||
type Cluster interface {
|
||||
// InitCluster bootstraps a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte, sshUsers map[string]string) error
|
||||
InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error
|
||||
// JoinCluster will join the current node to an existing cluster.
|
||||
JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error
|
||||
// GetKubeconfig reads the kubeconfig from the filesystem. Only succeeds after cluster is initialized.
|
||||
@ -106,7 +111,9 @@ type Cluster interface {
|
||||
type ClusterFake struct{}
|
||||
|
||||
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
func (c *ClusterFake) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte, sshUsers map[string]string) error {
|
||||
func (c *ClusterFake) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
@ -104,7 +105,8 @@ func TestInitCluster(t *testing.T) {
|
||||
core, err := NewCore(tc.vpn, tc.cluster, tc.metadata, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
kubeconfig, err := core.InitCluster(context.Background(), tc.autoscalingNodeGroups, "cloud-service-account-uri", tc.masterSecret, tc.sshUsers)
|
||||
id := attestationtypes.ID{Owner: []byte{0x1}, Cluster: []byte{0x2}}
|
||||
kubeconfig, err := core.InitCluster(context.Background(), tc.autoscalingNodeGroups, "cloud-service-account-uri", id, tc.masterSecret, tc.sshUsers)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -196,7 +198,9 @@ type clusterStub struct {
|
||||
inVpnIP string
|
||||
}
|
||||
|
||||
func (c *clusterStub) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, vpnIP string, masterSecret []byte, sshUsers map[string]string) error {
|
||||
func (c *clusterStub) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error {
|
||||
c.inAutoscalingNodeGroups = autoscalingNodeGroups
|
||||
c.inCloudServiceAccountURI = cloudServiceAccountURI
|
||||
c.inVpnIP = vpnIP
|
||||
|
238
coordinator/kubernetes/k8sapi/resources/activation.go
Normal file
238
coordinator/kubernetes/k8sapi/resources/activation.go
Normal file
@ -0,0 +1,238 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/secrets"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const activationImage = "ghcr.io/edgelesssys/constellation/activation-service:latest"
|
||||
|
||||
type activationDaemonset struct {
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
ConfigMap k8s.ConfigMap
|
||||
DaemonSet apps.DaemonSet
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
Service k8s.Service
|
||||
}
|
||||
|
||||
// NewActivationDaemonset returns a daemonset for the activation service.
|
||||
func NewActivationDaemonset(csp, measurementsJSON, idJSON string) *activationDaemonset {
|
||||
return &activationDaemonset{
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "activation-service",
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "activation-service",
|
||||
},
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"secrets"},
|
||||
Verbs: []string{"get", "list", "create"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRoleBinding: rbac.ClusterRoleBinding{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRoleBinding",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "activation-service",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "activation-service",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "activation-service",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "activation-service",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "activation-service",
|
||||
"component": "activation-service",
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
},
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &meta.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "activation-service",
|
||||
},
|
||||
},
|
||||
Template: k8s.PodTemplateSpec{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "activation-service",
|
||||
},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
PriorityClassName: "system-cluster-critical",
|
||||
ServiceAccountName: "activation-service",
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: k8s.TolerationOpEqual,
|
||||
Value: "true",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// Only run on control plane nodes
|
||||
NodeSelector: map[string]string{
|
||||
"node-role.kubernetes.io/master": "",
|
||||
},
|
||||
ImagePullSecrets: []k8s.LocalObjectReference{
|
||||
{
|
||||
Name: secrets.PullSecretName,
|
||||
},
|
||||
},
|
||||
Containers: []k8s.Container{
|
||||
{
|
||||
Name: "activation-service",
|
||||
Image: activationImage,
|
||||
Ports: []k8s.ContainerPort{
|
||||
{
|
||||
ContainerPort: 9090,
|
||||
Name: "tcp",
|
||||
},
|
||||
},
|
||||
SecurityContext: &k8s.SecurityContext{
|
||||
Privileged: func(b bool) *bool { return &b }(true),
|
||||
},
|
||||
Args: []string{
|
||||
fmt.Sprintf("--cloud-provider=%s", csp),
|
||||
fmt.Sprintf("--kms-endpoint=kms.kube-system:%d", constants.KMSPort),
|
||||
"--v=5",
|
||||
},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
ReadOnly: true,
|
||||
MountPath: constants.ActivationBasePath,
|
||||
},
|
||||
{
|
||||
Name: "kubeadm",
|
||||
ReadOnly: true,
|
||||
MountPath: "/etc/kubernetes",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []k8s.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
ConfigMap: &k8s.ConfigMapVolumeSource{
|
||||
LocalObjectReference: k8s.LocalObjectReference{
|
||||
Name: "activation-config",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "kubeadm",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/etc/kubernetes",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ServiceAccount",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "activation-service",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
Service: k8s.Service{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "activation-service",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Spec: k8s.ServiceSpec{
|
||||
Type: k8s.ServiceTypeNodePort,
|
||||
Ports: []k8s.ServicePort{
|
||||
{
|
||||
Name: "grpc",
|
||||
Protocol: k8s.ProtocolTCP,
|
||||
Port: constants.ActivationServicePort,
|
||||
TargetPort: intstr.IntOrString{IntVal: constants.ActivationServicePort},
|
||||
NodePort: constants.ActivationServiceNodePort,
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"k8s-app": "activation-service",
|
||||
},
|
||||
},
|
||||
},
|
||||
ConfigMap: k8s.ConfigMap{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "activation-config",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"measurements": measurementsJSON,
|
||||
"id": idJSON,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal the daemonset using the Kubernetes resource marshaller.
|
||||
func (a *activationDaemonset) Marshal() ([]byte, error) {
|
||||
return MarshalK8SResources(a)
|
||||
}
|
18
coordinator/kubernetes/k8sapi/resources/activation_test.go
Normal file
18
coordinator/kubernetes/k8sapi/resources/activation_test.go
Normal file
@ -0,0 +1,18 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewActivationDaemonset(t *testing.T) {
|
||||
deployment := NewActivationDaemonset("csp", "measurementsJSON", "idJSON")
|
||||
deploymentYAML, err := deployment.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
var recreated activationDaemonset
|
||||
require.NoError(t, UnmarshalK8SResources(deploymentYAML, &recreated))
|
||||
assert.Equal(t, deployment, &recreated)
|
||||
}
|
@ -1,16 +1,20 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/secrets"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
type kmsDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
Service k8s.Service
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
Deployment apps.Deployment
|
||||
@ -35,6 +39,30 @@ func NewKMSDeployment(masterSecret []byte) *kmsDeployment {
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
Service: k8s.Service{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "kms",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Spec: k8s.ServiceSpec{
|
||||
Type: k8s.ServiceTypeClusterIP,
|
||||
Ports: []k8s.ServicePort{
|
||||
{
|
||||
Name: "grpc",
|
||||
Protocol: k8s.ProtocolTCP,
|
||||
Port: constants.KMSPort,
|
||||
TargetPort: intstr.FromInt(constants.KMSPort),
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"k8s-app": "kms",
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
@ -100,6 +128,31 @@ func NewKMSDeployment(masterSecret []byte) *kmsDeployment {
|
||||
},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
PriorityClassName: "system-cluster-critical",
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: k8s.TolerationOpEqual,
|
||||
Value: "true",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
// Only run on control plane nodes
|
||||
NodeSelector: map[string]string{
|
||||
"node-role.kubernetes.io/master": "",
|
||||
},
|
||||
ImagePullSecrets: []k8s.LocalObjectReference{
|
||||
{
|
||||
Name: secrets.PullSecretName,
|
||||
@ -126,6 +179,10 @@ func NewKMSDeployment(masterSecret []byte) *kmsDeployment {
|
||||
{
|
||||
Name: "kms",
|
||||
Image: kmsImage,
|
||||
Args: []string{
|
||||
fmt.Sprintf("--port=%d", constants.KMSPort),
|
||||
"--v=5",
|
||||
},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "mastersecret",
|
||||
|
@ -232,6 +232,11 @@ func (k *KubernetesUtil) SetupAutoscaling(kubectl Client, clusterAutoscalerConfi
|
||||
return kubectl.Apply(clusterAutoscalerConfiguration, true)
|
||||
}
|
||||
|
||||
// SetupActivationService deploys the Constellation node activation service.
|
||||
func (k *KubernetesUtil) SetupActivationService(kubectl Client, activationServiceConfiguration resources.Marshaler) error {
|
||||
return kubectl.Apply(activationServiceConfiguration, true)
|
||||
}
|
||||
|
||||
// SetupCloudControllerManager deploys the k8s cloud-controller-manager.
|
||||
func (k *KubernetesUtil) SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error {
|
||||
if err := kubectl.Apply(configMaps, true); err != nil {
|
||||
|
@ -16,6 +16,7 @@ type clusterUtil interface {
|
||||
SetupPodNetwork(context.Context, k8sapi.SetupPodNetworkInput) error
|
||||
SetupAccessManager(kubectl k8sapi.Client, sshUsers resources.Marshaler) error
|
||||
SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupActivationService(kubectl k8sapi.Client, activationServiceConfiguration resources.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
SetupKMS(kubectl k8sapi.Client, kmsConfiguration resources.Marshaler) error
|
||||
|
@ -2,6 +2,7 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -10,6 +11,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/spf13/afero"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
@ -27,36 +29,40 @@ type configurationProvider interface {
|
||||
|
||||
// KubeWrapper implements Cluster interface.
|
||||
type KubeWrapper struct {
|
||||
cloudProvider string
|
||||
clusterUtil clusterUtil
|
||||
configProvider configurationProvider
|
||||
client k8sapi.Client
|
||||
kubeconfigReader configReader
|
||||
cloudControllerManager CloudControllerManager
|
||||
cloudNodeManager CloudNodeManager
|
||||
clusterAutoscaler ClusterAutoscaler
|
||||
providerMetadata ProviderMetadata
|
||||
cloudProvider string
|
||||
clusterUtil clusterUtil
|
||||
configProvider configurationProvider
|
||||
client k8sapi.Client
|
||||
kubeconfigReader configReader
|
||||
cloudControllerManager CloudControllerManager
|
||||
cloudNodeManager CloudNodeManager
|
||||
clusterAutoscaler ClusterAutoscaler
|
||||
providerMetadata ProviderMetadata
|
||||
initialMeasurementsJSON []byte
|
||||
}
|
||||
|
||||
// New creates a new KubeWrapper with real values.
|
||||
func New(cloudProvider string, clusterUtil clusterUtil, configProvider configurationProvider, client k8sapi.Client, cloudControllerManager CloudControllerManager,
|
||||
cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata,
|
||||
cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata, initialMeasurementsJSON []byte,
|
||||
) *KubeWrapper {
|
||||
return &KubeWrapper{
|
||||
cloudProvider: cloudProvider,
|
||||
clusterUtil: clusterUtil,
|
||||
configProvider: configProvider,
|
||||
client: client,
|
||||
kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}},
|
||||
cloudControllerManager: cloudControllerManager,
|
||||
cloudNodeManager: cloudNodeManager,
|
||||
clusterAutoscaler: clusterAutoscaler,
|
||||
providerMetadata: providerMetadata,
|
||||
cloudProvider: cloudProvider,
|
||||
clusterUtil: clusterUtil,
|
||||
configProvider: configProvider,
|
||||
client: client,
|
||||
kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}},
|
||||
cloudControllerManager: cloudControllerManager,
|
||||
cloudNodeManager: cloudNodeManager,
|
||||
clusterAutoscaler: clusterAutoscaler,
|
||||
providerMetadata: providerMetadata,
|
||||
initialMeasurementsJSON: initialMeasurementsJSON,
|
||||
}
|
||||
}
|
||||
|
||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||
func (k *KubeWrapper) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte, sshUsers map[string]string) error {
|
||||
func (k *KubeWrapper) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, id attestationtypes.ID, masterSecret []byte, sshUsers map[string]string,
|
||||
) error {
|
||||
// TODO: k8s version should be user input
|
||||
if err := k.clusterUtil.InstallComponents(context.TODO(), "1.23.6"); err != nil {
|
||||
return err
|
||||
@ -141,6 +147,10 @@ func (k *KubeWrapper) InitCluster(ctx context.Context, autoscalingNodeGroups []s
|
||||
return fmt.Errorf("setup of kms failed: %w", err)
|
||||
}
|
||||
|
||||
if err := k.setupActivationService(k.cloudProvider, k.initialMeasurementsJSON, id); err != nil {
|
||||
return fmt.Errorf("setting up activation service failed: %w", err)
|
||||
}
|
||||
|
||||
if err := k.setupCCM(context.TODO(), vpnIP, subnetworkPodCIDR, cloudServiceAccountURI, instance); err != nil {
|
||||
return fmt.Errorf("setting up cloud controller manager failed: %w", err)
|
||||
}
|
||||
@ -240,6 +250,17 @@ func (k *KubeWrapper) GetJoinToken(ctx context.Context, ttl time.Duration) (*kub
|
||||
return k.clusterUtil.CreateJoinToken(ctx, ttl)
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupActivationService(csp string, measurementsJSON []byte, id attestationtypes.ID) error {
|
||||
idJSON, err := json.Marshal(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activationConfiguration := resources.NewActivationDaemonset(csp, string(measurementsJSON), string(idJSON)) // TODO: set kms endpoint
|
||||
|
||||
return k.clusterUtil.SetupActivationService(k.client, activationConfiguration)
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupCCM(ctx context.Context, vpnIP, subnetworkPodCIDR, cloudServiceAccountURI string, instance cloudtypes.Instance) error {
|
||||
if !k.cloudControllerManager.Supported() {
|
||||
return nil
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
@ -172,6 +173,17 @@ func TestInitCluster(t *testing.T) {
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting up the activation service": {
|
||||
clusterUtil: stubClusterUtil{setupActivationServiceError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{SupportedResp: true},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting the cloud contoller manager": {
|
||||
clusterUtil: stubClusterUtil{setupCloudControllerManagerError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
@ -244,7 +256,7 @@ func TestInitCluster(t *testing.T) {
|
||||
client: &tc.kubeCTL,
|
||||
kubeconfigReader: tc.kubeconfigReader,
|
||||
}
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, coordinatorVPNIP, masterSecret, nil)
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, coordinatorVPNIP, attestationtypes.ID{}, masterSecret, nil)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -498,6 +510,7 @@ type stubClusterUtil struct {
|
||||
initClusterErr error
|
||||
setupPodNetworkErr error
|
||||
setupAutoscalingError error
|
||||
setupActivationServiceError error
|
||||
setupCloudControllerManagerError error
|
||||
setupCloudNodeManagerError error
|
||||
setupKMSError error
|
||||
@ -529,6 +542,10 @@ func (s *stubClusterUtil) SetupAutoscaling(kubectl k8sapi.Client, clusterAutosca
|
||||
return s.setupAutoscalingError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupActivationService(kubectl k8sapi.Client, activationServiceConfiguration resources.Marshaler) error {
|
||||
return s.setupActivationServiceError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error {
|
||||
return s.setupCloudControllerManagerError
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/state/keyservice/keyproto"
|
||||
"go.uber.org/zap"
|
||||
@ -101,7 +102,8 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
}
|
||||
|
||||
logToCLI("Initializing Kubernetes ...")
|
||||
kubeconfig, err := a.core.InitCluster(context.TODO(), in.AutoscalingNodeGroups, in.CloudServiceAccountUri, in.MasterSecret, in.SshUserKeys)
|
||||
id := attestationtypes.ID{Owner: ownerID, Cluster: clusterID}
|
||||
kubeconfig, err := a.core.InitCluster(context.TODO(), in.AutoscalingNodeGroups, in.CloudServiceAccountUri, id, in.MasterSecret, in.SshUserKeys)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "initializing Kubernetes cluster failed: %v", err)
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
kms "github.com/edgelesssys/constellation/kms/server/setup"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
@ -40,6 +41,8 @@ type Core interface {
|
||||
|
||||
CreateSSHUsers([]ssh.UserKey) error
|
||||
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte, sshUserKeys []*pubproto.SSHUserKey) ([]byte, error)
|
||||
InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, id attestationtypes.ID, masterSecret []byte, sshUserKeys []*pubproto.SSHUserKey,
|
||||
) ([]byte, error)
|
||||
JoinCluster(ctx context.Context, joinToken *kubeadm.BootstrapTokenDiscovery, certificateKey string, role role.Role) error
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
attestationtypes "github.com/edgelesssys/constellation/internal/attestation/types"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
kms "github.com/edgelesssys/constellation/kms/server/setup"
|
||||
@ -123,7 +124,9 @@ func (c *fakeCore) UpdatePeers(peers []peer.Peer) error {
|
||||
return c.UpdatePeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte, sshUsers []*pubproto.SSHUserKey) ([]byte, error) {
|
||||
func (c *fakeCore) InitCluster(
|
||||
ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, id attestationtypes.ID, masterSecret []byte, sshUsers []*pubproto.SSHUserKey,
|
||||
) ([]byte, error) {
|
||||
c.autoscalingNodeGroups = autoscalingNodeGroups
|
||||
return c.kubeconfig, nil
|
||||
}
|
||||
|
11
go.mod
11
go.mod
@ -116,15 +116,9 @@ require (
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
)
|
||||
|
||||
require (
|
||||
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect
|
||||
github.com/aws/smithy-go v1.11.2 // indirect
|
||||
github.com/gofrs/uuid v4.0.0+incompatible // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.100.2 // indirect
|
||||
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
@ -149,6 +143,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.2 // indirect
|
||||
github.com/aws/smithy-go v1.11.2 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.6.0 // indirect
|
||||
@ -165,6 +160,7 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.6 // indirect
|
||||
github.com/gofrs/uuid v4.0.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
@ -176,6 +172,7 @@ require (
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/go-tspi v0.3.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/icholy/replace v0.5.0
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
|
3
go.sum
3
go.sum
@ -604,8 +604,6 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
@ -1919,7 +1917,6 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220207234003-57398862261d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
7
internal/attestation/types/types.go
Normal file
7
internal/attestation/types/types.go
Normal file
@ -0,0 +1,7 @@
|
||||
package attestationtypes
|
||||
|
||||
// ID holds the identifiers of a node.
|
||||
type ID struct {
|
||||
Cluster []byte `json:"cluster"`
|
||||
Owner []byte `json:"owner"`
|
||||
}
|
@ -22,11 +22,14 @@ const (
|
||||
// Ports.
|
||||
//
|
||||
|
||||
CoordinatorPort = 9000
|
||||
EnclaveSSHPort = 2222
|
||||
SSHPort = 22
|
||||
WireguardPort = 51820
|
||||
NVMEOverTCPPort = 8009
|
||||
ActivationServicePort = 9090
|
||||
ActivationServiceNodePort = 30090
|
||||
KMSPort = 9000
|
||||
CoordinatorPort = 9000
|
||||
EnclaveSSHPort = 2222
|
||||
SSHPort = 22
|
||||
WireguardPort = 51820
|
||||
NVMEOverTCPPort = 8009
|
||||
// Default NodePort Range
|
||||
// https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
NodePortFrom = 30000
|
||||
@ -46,8 +49,9 @@ const (
|
||||
CoreOSAdminConfFilename = "/etc/kubernetes/admin.conf"
|
||||
|
||||
// Filenames for the Activation service.
|
||||
ActivationMeasurementsFilename = "/var/config/measurements"
|
||||
ActivationIDFilename = "/var/config/id"
|
||||
ActivationBasePath = "/var/config"
|
||||
ActivationMeasurementsFilename = "measurements"
|
||||
ActivationIDFilename = "id"
|
||||
|
||||
//
|
||||
// Cryptographic constants.
|
||||
|
31
internal/grpc/grpc_klog/grpc_klog.go
Normal file
31
internal/grpc/grpc_klog/grpc_klog.go
Normal file
@ -0,0 +1,31 @@
|
||||
// grpc_klog provides a logging interceptor for the klog logger.
|
||||
package grpc_klog
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/peer"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// LogGRPC writes a log with the name of every gRPC call or error it receives.
|
||||
// Request parameters or responses are NOT logged.
|
||||
func LogGRPC(level klog.Level) func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
// log the requests method name
|
||||
var addr string
|
||||
peer, ok := peer.FromContext(ctx)
|
||||
if ok {
|
||||
addr = peer.Addr.String()
|
||||
}
|
||||
klog.V(level).Infof("GRPC call from peer: %q: %s", addr, info.FullMethod)
|
||||
|
||||
// log errors, if any
|
||||
resp, err := handler(ctx, req)
|
||||
if err != nil {
|
||||
klog.Errorf("GRPC error: %v", err)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
}
|
@ -5,53 +5,62 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/edgelesssys/constellation/internal/grpc/grpc_klog"
|
||||
"github.com/edgelesssys/constellation/kms/server/kmsapi"
|
||||
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
|
||||
"github.com/edgelesssys/constellation/kms/server/setup"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
port := flag.String("p", "9000", "Port gRPC server listens on")
|
||||
port := flag.String("port", "9000", "Port gRPC server listens on")
|
||||
masterSecretPath := flag.String("master-secret", "/constellation/constellation-mastersecret.base64", "Path to the Constellation master secret")
|
||||
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
defer klog.Flush()
|
||||
|
||||
klog.V(2).Infof("\nConstellation Key Management Service\nVersion: %s", constants.VersionInfo)
|
||||
|
||||
masterKey, err := readMainSecret(*masterSecretPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read master secret: %v", err)
|
||||
klog.Exitf("Failed to read master secret: %v", err)
|
||||
}
|
||||
|
||||
conKMS, err := setup.SetUpKMS(context.Background(), setup.NoStoreURI, setup.ClusterKMSURI)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup KMS: %v", err)
|
||||
klog.Exitf("Failed to setup KMS: %v", err)
|
||||
}
|
||||
|
||||
if err := conKMS.CreateKEK(context.Background(), "Constellation", masterKey); err != nil {
|
||||
log.Fatalf("Failed to create KMS KEK from MasterKey: %v", err)
|
||||
klog.Exitf("Failed to create KMS KEK from MasterKey: %v", err)
|
||||
}
|
||||
|
||||
lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", *port))
|
||||
lis, err := net.Listen("tcp", net.JoinHostPort("", *port))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to listen: %v", err)
|
||||
klog.Exitf("Failed to listen: %v", err)
|
||||
}
|
||||
|
||||
srv := kmsapi.New(&zap.Logger{}, conKMS)
|
||||
|
||||
// TODO: Launch server with aTLS to allow attestation for clients.
|
||||
grpcServer := grpc.NewServer()
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.UnaryInterceptor(grpc_klog.LogGRPC(2)),
|
||||
)
|
||||
|
||||
kmsproto.RegisterAPIServer(grpcServer, srv)
|
||||
|
||||
klog.V(2).Infof("Starting key management service on %s", lis.Addr().String())
|
||||
if err := grpcServer.Serve(lis); err != nil {
|
||||
log.Fatalf("Failed to serve: %s", err)
|
||||
klog.Exitf("Failed to serve: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// API resembles an encryption key management api server through logger, CloudKMS and proto-unimplemented server.
|
||||
@ -30,16 +31,19 @@ func New(logger *zap.Logger, conKMS kms.CloudKMS) *API {
|
||||
func (a *API) GetDataKey(ctx context.Context, in *kmsproto.GetDataKeyRequest) (*kmsproto.GetDataKeyResponse, error) {
|
||||
// Error on 0 key length
|
||||
if in.Length == 0 {
|
||||
klog.Error("GetDataKey: requested key length is zero")
|
||||
return nil, status.Error(codes.InvalidArgument, "can't derive key with length zero")
|
||||
}
|
||||
|
||||
// Error on empty DataKeyId
|
||||
if in.DataKeyId == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "no data key id specified")
|
||||
klog.Error("GetDataKey: no data key ID specified")
|
||||
return nil, status.Error(codes.InvalidArgument, "no data key ID specified")
|
||||
}
|
||||
|
||||
key, err := a.conKMS.GetDEK(ctx, "Constellation", "key-"+in.DataKeyId, int(in.Length))
|
||||
if err != nil {
|
||||
klog.Errorf("GetDataKey: failed to get data key: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
return &kmsproto.GetDataKeyResponse{DataKey: key}, nil
|
||||
|
Loading…
Reference in New Issue
Block a user