From e363f03240b6f3d420916c06f3405a53ffe8a51a Mon Sep 17 00:00:00 2001 From: Otto Bittner Date: Wed, 2 Nov 2022 17:47:10 +0100 Subject: [PATCH] AB#2582: deploy CNM via Helm (#423) --- bootstrapper/cmd/bootstrapper/main.go | 9 +- bootstrapper/internal/helm/client.go | 2 - .../internal/kubernetes/cloud_provider.go | 24 --- .../internal/kubernetes/k8sapi/k8sutil.go | 19 -- .../k8sapi/resources/cloudnodemanager.go | 187 ------------------ .../k8sapi/resources/cloudnodemanager_test.go | 28 --- bootstrapper/internal/kubernetes/k8sutil.go | 2 - .../internal/kubernetes/kubernetes.go | 27 +-- .../internal/kubernetes/kubernetes_test.go | 53 ++--- .../constellation-services/Chart.yaml | 4 + .../charts/cnm/.helmignore | 23 +++ .../charts/cnm/Chart.yaml | 5 + .../charts/cnm/templates/azure-daemonset.yaml | 57 ++++++ .../charts/cnm/templates/clusterrole.yaml | 25 +++ .../cnm/templates/clusterrolebinding.yaml | 16 ++ .../charts/cnm/templates/serviceaccount.yaml | 9 + .../charts/cnm/values.schema.json | 15 ++ .../charts/cnm/values.yaml | 0 cli/internal/helm/loader.go | 11 +- cli/internal/helm/loader_test.go | 9 +- .../charts/cnm/templates/azure-daemonset.yaml | 57 ++++++ .../charts/cnm/templates/clusterrole.yaml | 25 +++ .../cnm/templates/clusterrolebinding.yaml | 16 ++ .../charts/cnm/templates/serviceaccount.yaml | 9 + 24 files changed, 297 insertions(+), 335 deletions(-) delete mode 100644 bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager.go delete mode 100644 bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager_test.go create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/.helmignore create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/Chart.yaml create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/azure-daemonset.yaml create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrole.yaml create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.schema.json create mode 100644 cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.yaml create mode 100644 cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/azure-daemonset.yaml create mode 100644 cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrole.yaml create mode 100644 cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/serviceaccount.yaml diff --git a/bootstrapper/cmd/bootstrapper/main.go b/bootstrapper/cmd/bootstrapper/main.go index 590392b2a..974786a2a 100644 --- a/bootstrapper/cmd/bootstrapper/main.go +++ b/bootstrapper/cmd/bootstrapper/main.go @@ -104,11 +104,10 @@ func main() { log.With(zap.Error(err)).Fatalf("Failed to set up cloud logger") } - nodeManager := &awscloud.CloudNodeManager{} cloudControllerManager := &awscloud.CloudControllerManager{} clusterInitJoiner = kubernetes.New( "aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), cloudControllerManager, - nodeManager, &gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient, + &gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient, ) openTPM = vtpm.OpenVTPM fs = afero.NewOsFs() @@ -145,7 +144,7 @@ func main() { } clusterInitJoiner = kubernetes.New( "gcp", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), cloudControllerManager, - &gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient, + &gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient, ) openTPM = vtpm.OpenVTPM fs = afero.NewOsFs() @@ -179,7 +178,7 @@ func main() { } clusterInitJoiner = kubernetes.New( "azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), azurecloud.NewCloudControllerManager(metadata), - &azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata, pcrsJSON, helmClient, + &azurecloud.Autoscaler{}, metadata, pcrsJSON, helmClient, ) openTPM = vtpm.OpenVTPM @@ -201,7 +200,7 @@ func main() { } clusterInitJoiner = kubernetes.New( "qemu", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), &qemucloud.CloudControllerManager{}, - &qemucloud.CloudNodeManager{}, &qemucloud.Autoscaler{}, metadata, pcrsJSON, helmClient, + &qemucloud.Autoscaler{}, metadata, pcrsJSON, helmClient, ) metadataAPI = metadata diff --git a/bootstrapper/internal/helm/client.go b/bootstrapper/internal/helm/client.go index 63c50186e..c47fcc8d3 100644 --- a/bootstrapper/internal/helm/client.go +++ b/bootstrapper/internal/helm/client.go @@ -58,8 +58,6 @@ func (h *Client) InstallConstellationServices(ctx context.Context, release helm. h.ReleaseName = release.ReleaseName h.Wait = release.Wait h.Timeout = timeout - // update dependencies - unsure if necessary for local deps. - h.DependencyUpdate = true mergedVals := mergeMaps(release.Values, extraVals) diff --git a/bootstrapper/internal/kubernetes/cloud_provider.go b/bootstrapper/internal/kubernetes/cloud_provider.go index a10243635..e48882363 100644 --- a/bootstrapper/internal/kubernetes/cloud_provider.go +++ b/bootstrapper/internal/kubernetes/cloud_provider.go @@ -187,30 +187,6 @@ func (m *stubCloudControllerManager) Supported() bool { return m.SupportedResp } -type stubCloudNodeManager struct { - SupportedResp bool - - ImageResp string - PathResp string - ExtraArgsResp []string -} - -func (m *stubCloudNodeManager) Image(k8sVersion versions.ValidK8sVersion) (string, error) { - return m.ImageResp, nil -} - -func (m *stubCloudNodeManager) Path() string { - return m.PathResp -} - -func (m *stubCloudNodeManager) ExtraArgs() []string { - return m.ExtraArgsResp -} - -func (m *stubCloudNodeManager) Supported() bool { - return m.SupportedResp -} - type stubClusterAutoscaler struct { SupportedResp bool } diff --git a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go index 854b80fa2..c6f1ffd0e 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go @@ -328,25 +328,6 @@ func (k *KubernetesUtil) SetupGCPGuestAgent(kubectl Client, guestAgentDaemonset return kubectl.Apply(guestAgentDaemonset, true) } -// SetupCloudControllerManager deploys the k8s cloud-controller-manager. -func (k *KubernetesUtil) SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error { - if err := kubectl.Apply(configMaps, true); err != nil { - return fmt.Errorf("applying ccm ConfigMaps: %w", err) - } - if err := kubectl.Apply(secrets, true); err != nil { - return fmt.Errorf("applying ccm Secrets: %w", err) - } - if err := kubectl.Apply(cloudControllerManagerConfiguration, true); err != nil { - return fmt.Errorf("applying ccm: %w", err) - } - return nil -} - -// SetupCloudNodeManager deploys the k8s cloud-node-manager. -func (k *KubernetesUtil) SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error { - return kubectl.Apply(cloudNodeManagerConfiguration, true) -} - // SetupAccessManager deploys the constellation-access-manager for deploying SSH keys on control-plane & worker nodes. func (k *KubernetesUtil) SetupAccessManager(kubectl Client, accessManagerConfiguration kubernetes.Marshaler) error { return kubectl.Apply(accessManagerConfiguration, true) diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager.go b/bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager.go deleted file mode 100644 index 3b6749a04..000000000 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: AGPL-3.0-only -*/ - -package resources - -import ( - "github.com/edgelesssys/constellation/v2/internal/kubernetes" - apps "k8s.io/api/apps/v1" - k8s "k8s.io/api/core/v1" - rbac "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/resource" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type CloudNodeManagerDeployment struct { - ServiceAccount k8s.ServiceAccount - ClusterRole rbac.ClusterRole - ClusterRoleBinding rbac.ClusterRoleBinding - DaemonSet apps.DaemonSet -} - -// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP. -func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *CloudNodeManagerDeployment { - command := []string{ - path, - "--node-name=$(NODE_NAME)", - } - command = append(command, extraArgs...) - return &CloudNodeManagerDeployment{ - ServiceAccount: k8s.ServiceAccount{ - TypeMeta: meta.TypeMeta{ - APIVersion: "v1", - Kind: "ServiceAccount", - }, - ObjectMeta: meta.ObjectMeta{ - Name: "cloud-node-manager", - Namespace: "kube-system", - Labels: map[string]string{ - "k8s-app": "cloud-node-manager", - "kubernetes.io/cluster-service": "true", - "addonmanager.kubernetes.io/mode": "Reconcile", - }, - }, - }, - ClusterRole: rbac.ClusterRole{ - TypeMeta: meta.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRole", - }, - ObjectMeta: meta.ObjectMeta{ - Name: "cloud-node-manager", - Labels: map[string]string{ - "k8s-app": "cloud-node-manager", - "kubernetes.io/cluster-service": "true", - "addonmanager.kubernetes.io/mode": "Reconcile", - }, - }, - Rules: []rbac.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"nodes"}, - Verbs: []string{"watch", "list", "get", "update", "patch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"nodes/status"}, - Verbs: []string{"patch"}, - }, - }, - }, - ClusterRoleBinding: rbac.ClusterRoleBinding{ - TypeMeta: meta.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRoleBinding", - }, - ObjectMeta: meta.ObjectMeta{ - Name: "cloud-node-manager", - Labels: map[string]string{ - "k8s-app": "cloud-node-manager", - "kubernetes.io/cluster-service": "true", - "addonmanager.kubernetes.io/mode": "Reconcile", - }, - }, - RoleRef: rbac.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "cloud-node-manager", - }, - Subjects: []rbac.Subject{ - { - Kind: "ServiceAccount", - Name: "cloud-node-manager", - Namespace: "kube-system", - }, - }, - }, - DaemonSet: apps.DaemonSet{ - TypeMeta: meta.TypeMeta{ - APIVersion: "apps/v1", - Kind: "DaemonSet", - }, - ObjectMeta: meta.ObjectMeta{ - Name: "cloud-node-manager", - Namespace: "kube-system", - Labels: map[string]string{ - "component": "cloud-node-manager", - "kubernetes.io/cluster-service": "true", - "addonmanager.kubernetes.io/mode": "Reconcile", - }, - }, - Spec: apps.DaemonSetSpec{ - Selector: &meta.LabelSelector{ - MatchLabels: map[string]string{"k8s-app": "cloud-node-manager"}, - }, - Template: k8s.PodTemplateSpec{ - ObjectMeta: meta.ObjectMeta{ - Labels: map[string]string{"k8s-app": "cloud-node-manager"}, - Annotations: map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"}, - }, - Spec: k8s.PodSpec{ - PriorityClassName: "system-node-critical", - ServiceAccountName: "cloud-node-manager", - HostNetwork: true, - NodeSelector: map[string]string{"kubernetes.io/os": "linux"}, - Tolerations: []k8s.Toleration{ - { - Key: "CriticalAddonsOnly", - Operator: k8s.TolerationOpExists, - }, - { - Key: "node-role.kubernetes.io/master", - Operator: k8s.TolerationOpEqual, - Value: "true", - Effect: k8s.TaintEffectNoSchedule, - }, - { - Key: "node-role.kubernetes.io/control-plane", - Operator: k8s.TolerationOpExists, - Effect: k8s.TaintEffectNoSchedule, - }, - { - Operator: k8s.TolerationOpExists, - Effect: k8s.TaintEffectNoExecute, - }, - { - Operator: k8s.TolerationOpExists, - Effect: k8s.TaintEffectNoSchedule, - }, - }, - Containers: []k8s.Container{ - { - Name: "cloud-node-manager", - Image: image, - ImagePullPolicy: k8s.PullIfNotPresent, - Command: command, - Env: []k8s.EnvVar{ - { - Name: "NODE_NAME", - ValueFrom: &k8s.EnvVarSource{ - FieldRef: &k8s.ObjectFieldSelector{ - FieldPath: "spec.nodeName", - }, - }, - }, - }, - Resources: k8s.ResourceRequirements{ - Requests: k8s.ResourceList{ - k8s.ResourceCPU: resource.MustParse("50m"), - k8s.ResourceMemory: resource.MustParse("50Mi"), - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -// Marshal marshals the cloud-node-manager deployment as YAML documents. -func (c *CloudNodeManagerDeployment) Marshal() ([]byte, error) { - return kubernetes.MarshalK8SResources(c) -} diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager_test.go b/bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager_test.go deleted file mode 100644 index fefcaeda5..000000000 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/cloudnodemanager_test.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: AGPL-3.0-only -*/ - -package resources - -import ( - "testing" - - "github.com/edgelesssys/constellation/v2/internal/kubernetes" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCloudNodeManagerMarshalUnmarshal(t *testing.T) { - require := require.New(t) - assert := assert.New(t) - - cloudNodeManagerDepl := NewDefaultCloudNodeManagerDeployment("image", "path", []string{}) - data, err := cloudNodeManagerDepl.Marshal() - require.NoError(err) - - var recreated CloudNodeManagerDeployment - require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated)) - assert.Equal(cloudNodeManagerDepl, &recreated) -} diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index cf088c936..6fb54ac3e 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -24,8 +24,6 @@ type clusterUtil interface { JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error SetupAccessManager(kubectl k8sapi.Client, sshUsers kubernetes.Marshaler) error SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error - SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error - SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index b2e82bafa..6a94c5ca6 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -54,7 +54,6 @@ type KubeWrapper struct { client k8sapi.Client kubeconfigReader configReader cloudControllerManager CloudControllerManager - cloudNodeManager CloudNodeManager clusterAutoscaler ClusterAutoscaler providerMetadata ProviderMetadata initialMeasurementsJSON []byte @@ -63,7 +62,7 @@ type KubeWrapper struct { // New creates a new KubeWrapper with real values. func New(cloudProvider string, clusterUtil clusterUtil, configProvider configurationProvider, client k8sapi.Client, cloudControllerManager CloudControllerManager, - cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata, initialMeasurementsJSON []byte, helmClient helmClient, + clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata, initialMeasurementsJSON []byte, helmClient helmClient, ) *KubeWrapper { return &KubeWrapper{ cloudProvider: cloudProvider, @@ -73,7 +72,6 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura client: client, kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}}, cloudControllerManager: cloudControllerManager, - cloudNodeManager: cloudNodeManager, clusterAutoscaler: clusterAutoscaler, providerMetadata: providerMetadata, initialMeasurementsJSON: initialMeasurementsJSON, @@ -210,10 +208,6 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err) } - if err := k.setupCloudNodeManager(k8sVersion); err != nil { - return nil, fmt.Errorf("setting up cloud node manager: %w", err) - } - if err := k.setupClusterAutoscaler(instance, cloudServiceAccountURI, k8sVersion); err != nil { return nil, fmt.Errorf("setting up cluster autoscaler: %w", err) } @@ -322,25 +316,6 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) { return k.kubeconfigReader.ReadKubeconfig() } -func (k *KubeWrapper) setupCloudNodeManager(k8sVersion versions.ValidK8sVersion) error { - if !k.cloudNodeManager.Supported() { - return nil - } - nodeManagerImage, err := k.cloudNodeManager.Image(k8sVersion) - if err != nil { - return fmt.Errorf("defining Image for Node Manager: %w", err) - } - - cloudNodeManagerConfiguration := resources.NewDefaultCloudNodeManagerDeployment( - nodeManagerImage, k.cloudNodeManager.Path(), k.cloudNodeManager.ExtraArgs(), - ) - if err := k.clusterUtil.SetupCloudNodeManager(k.client, cloudNodeManagerConfiguration); err != nil { - return fmt.Errorf("setting up cloud-node-manager: %w", err) - } - - return nil -} - func (k *KubeWrapper) setupClusterAutoscaler(instance metadata.InstanceMetadata, cloudServiceAccountURI string, k8sVersion versions.ValidK8sVersion) error { if !k.clusterAutoscaler.Supported() { return nil diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index f176383ea..1492e3d25 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -49,7 +49,6 @@ func TestInitCluster(t *testing.T) { kubectl stubKubectl providerMetadata ProviderMetadata CloudControllerManager CloudControllerManager - CloudNodeManager CloudNodeManager ClusterAutoscaler ClusterAutoscaler kubeconfigReader configReader wantConfig k8sapi.KubeadmInitYAML @@ -63,7 +62,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{SupportedResp: false}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{SupportedResp: false}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantConfig: k8sapi.KubeadmInitYAML{ InitConfiguration: kubeadm.InitConfiguration{ @@ -96,7 +94,6 @@ func TestInitCluster(t *testing.T) { SupportsLoadBalancerResp: true, }, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{SupportedResp: false}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantConfig: k8sapi.KubeadmInitYAML{ InitConfiguration: kubeadm.InitConfiguration{ @@ -128,7 +125,6 @@ func TestInitCluster(t *testing.T) { SupportedResp: true, }, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -143,7 +139,6 @@ func TestInitCluster(t *testing.T) { SupportedResp: true, }, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -159,7 +154,6 @@ func TestInitCluster(t *testing.T) { SupportedResp: true, }, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -171,7 +165,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -184,7 +177,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -197,19 +189,18 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{SupportedResp: true}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, }, "kubeadm init fails when setting the cloud node manager": { - clusterUtil: stubClusterUtil{setupCloudNodeManagerError: someErr}, + clusterUtil: stubClusterUtil{}, + helmClient: stubHelmClient{servicesError: someErr}, kubeconfigReader: &stubKubeconfigReader{ Kubeconfig: []byte("someKubeconfig"), }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{SupportedResp: true}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -221,7 +212,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{SupportedResp: true}, wantErr: true, k8sVersion: versions.Default, @@ -233,7 +223,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -245,7 +234,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{SupportedResp: false}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{SupportedResp: false}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -257,7 +245,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{SupportedResp: false}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{SupportedResp: false}, ClusterAutoscaler: &stubClusterAutoscaler{}, wantErr: true, k8sVersion: versions.Default, @@ -269,7 +256,6 @@ func TestInitCluster(t *testing.T) { }, providerMetadata: &stubProviderMetadata{}, CloudControllerManager: &stubCloudControllerManager{}, - CloudNodeManager: &stubCloudNodeManager{}, ClusterAutoscaler: &stubClusterAutoscaler{}, k8sVersion: "1.19", wantErr: true, @@ -286,7 +272,6 @@ func TestInitCluster(t *testing.T) { helmClient: &tc.helmClient, providerMetadata: tc.providerMetadata, cloudControllerManager: tc.CloudControllerManager, - cloudNodeManager: tc.CloudNodeManager, clusterAutoscaler: tc.ClusterAutoscaler, configProvider: &stubConfigProvider{InitConfig: k8sapi.KubeadmInitYAML{}}, client: &tc.kubectl, @@ -503,20 +488,18 @@ func TestK8sCompliantHostname(t *testing.T) { } type stubClusterUtil struct { - installComponentsErr error - initClusterErr error - setupAutoscalingError error - setupCloudControllerManagerError error - setupCloudNodeManagerError error - setupKonnectivityError error - setupAccessManagerError error - setupVerificationServiceErr error - setupGCPGuestAgentErr error - setupOLMErr error - setupNMOErr error - setupNodeOperatorErr error - joinClusterErr error - startKubeletErr error + installComponentsErr error + initClusterErr error + setupAutoscalingError error + setupKonnectivityError error + setupAccessManagerError error + setupVerificationServiceErr error + setupGCPGuestAgentErr error + setupOLMErr error + setupNMOErr error + setupNodeOperatorErr error + joinClusterErr error + startKubeletErr error initConfigs [][]byte joinConfigs [][]byte @@ -543,18 +526,10 @@ func (s *stubClusterUtil) SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgen return s.setupGCPGuestAgentErr } -func (s *stubClusterUtil) SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error { - return s.setupCloudControllerManagerError -} - func (s *stubClusterUtil) SetupAccessManager(kubectl k8sapi.Client, accessManagerConfiguration kubernetes.Marshaler) error { return s.setupAccessManagerError } -func (s *stubClusterUtil) SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error { - return s.setupCloudNodeManagerError -} - func (s *stubClusterUtil) SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error { return s.setupVerificationServiceErr } diff --git a/cli/internal/helm/charts/edgeless/constellation-services/Chart.yaml b/cli/internal/helm/charts/edgeless/constellation-services/Chart.yaml index 1eaefd701..45f43bb09 100644 --- a/cli/internal/helm/charts/edgeless/constellation-services/Chart.yaml +++ b/cli/internal/helm/charts/edgeless/constellation-services/Chart.yaml @@ -25,3 +25,7 @@ dependencies: - Azure - GCP - AWS + - name: cnm + version: 2.2.0-pre + tags: + - Azure diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/.helmignore b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/Chart.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/Chart.yaml new file mode 100644 index 000000000..d260a801e --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: cnm +description: A chart to deploy cloud node manager for constellation +type: application +version: 2.2.0-pre diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/azure-daemonset.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/azure-daemonset.yaml new file mode 100644 index 000000000..a2da0d2d0 --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/azure-daemonset.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloud-node-manager + namespace: {{ .Release.Namespace}} + labels: + addonmanager.kubernetes.io/mode: Reconcile + component: cloud-node-manager + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: cloud-node-manager + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/daemonset-pod: "true" + labels: + k8s-app: cloud-node-manager + spec: + containers: + - name: cloud-node-manager + image: {{ .Values.image }} + imagePullPolicy: IfNotPresent + command: + - cloud-node-manager + - --node-name=$(NODE_NAME) + - --wait-routes=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + requests: + cpu: 50m + memory: 50Mi + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: cloud-node-manager + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + updateStrategy: {} diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrole.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrole.yaml new file mode 100644 index 000000000..886db4e42 --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloud-node-manager + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: cloud-node-manager + kubernetes.io/cluster-service: "true" +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get + - update + - patch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrolebinding.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..9d6579b1e --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloud-node-manager + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: cloud-node-manager + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cloud-node-manager +subjects: +- kind: ServiceAccount + name: cloud-node-manager + namespace: {{ .Release.Namespace}} diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/serviceaccount.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/serviceaccount.yaml new file mode 100644 index 000000000..9a53eeb0a --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-node-manager + namespace: {{ .Release.Namespace}} + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: cloud-node-manager + kubernetes.io/cluster-service: "true" diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.schema.json b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.schema.json new file mode 100644 index 000000000..93d0b9dbb --- /dev/null +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.schema.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft-07/schema#", + "properties": { + "image": { + "description": "Container image to use for the spawned pods.", + "type": "string", + "examples": ["mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.23.21"] + } + }, + "required": [ + "image" + ], + "title": "Values", + "type": "object" +} diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/cnm/values.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/cli/internal/helm/loader.go b/cli/internal/helm/loader.go index 9f63e1dec..62765607c 100644 --- a/cli/internal/helm/loader.go +++ b/cli/internal/helm/loader.go @@ -38,15 +38,17 @@ type ChartLoader struct { joinServiceImage string kmsImage string ccmImage string + cnmImage string } func New(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion) *ChartLoader { - var ccmImage string + var ccmImage, cnmImage string switch csp { case cloudprovider.AWS: ccmImage = versions.VersionConfigs[k8sVersion].CloudControllerManagerImageAWS case cloudprovider.Azure: ccmImage = versions.VersionConfigs[k8sVersion].CloudControllerManagerImageAzure + cnmImage = versions.VersionConfigs[k8sVersion].CloudNodeManagerImageAzure case cloudprovider.GCP: ccmImage = versions.VersionConfigs[k8sVersion].CloudControllerManagerImageGCP } @@ -55,6 +57,7 @@ func New(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion) *Chart joinServiceImage: versions.JoinImage, kmsImage: versions.KmsImage, ccmImage: ccmImage, + cnmImage: cnmImage, } } @@ -156,7 +159,7 @@ func (i *ChartLoader) loadConstellationServices(csp cloudprovider.Provider, "image": i.joinServiceImage, "namespace": constants.ConstellationNamespace, }, - "ccm": map[string]interface{}{ + "ccm": map[string]any{ "csp": csp, }, } @@ -178,6 +181,10 @@ func (i *ChartLoader) loadConstellationServices(csp cloudprovider.Provider, "image": i.ccmImage, } + vals["cnm"] = map[string]any{ + "image": i.cnmImage, + } + vals["tags"] = map[string]any{ "Azure": true, } diff --git a/cli/internal/helm/loader_test.go b/cli/internal/helm/loader_test.go index a71ee304e..e4a16dd29 100644 --- a/cli/internal/helm/loader_test.go +++ b/cli/internal/helm/loader_test.go @@ -50,6 +50,7 @@ func TestTemplate(t *testing.T) { enforceIDKeyDigest bool valuesModifier func(map[string]any) error ccmImage string + cnmImage string }{ "GCP": { csp: cloudprovider.GCP, @@ -62,6 +63,7 @@ func TestTemplate(t *testing.T) { enforceIDKeyDigest: true, valuesModifier: prepareAzureValues, ccmImage: "ccmImageForAzure", + cnmImage: "cnmImageForAzure", }, "QEMU": { csp: cloudprovider.QEMU, @@ -75,7 +77,7 @@ func TestTemplate(t *testing.T) { assert := assert.New(t) require := require.New(t) - chartLoader := ChartLoader{joinServiceImage: "joinServiceImage", kmsImage: "kmsImage", ccmImage: tc.ccmImage} + chartLoader := ChartLoader{joinServiceImage: "joinServiceImage", kmsImage: "kmsImage", ccmImage: tc.ccmImage, cnmImage: tc.cnmImage} release, err := chartLoader.Load(tc.csp, true, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []uint32{1, 11}, tc.enforceIDKeyDigest) require.NoError(err) @@ -98,8 +100,13 @@ func TestTemplate(t *testing.T) { err = tc.valuesModifier(helmReleases.ConstellationServices.Values) require.NoError(err) + // This step is needed to enabled/disable subcharts according to their tags/conditions. + err = chartutil.ProcessDependencies(chart, helmReleases.ConstellationServices.Values) + require.NoError(err) + valuesToRender, err := chartutil.ToRenderValues(chart, helmReleases.ConstellationServices.Values, options, caps) require.NoError(err) + result, err := engine.Render(chart, valuesToRender) require.NoError(err) for k, v := range result { diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/azure-daemonset.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/azure-daemonset.yaml new file mode 100644 index 000000000..c3b9bb9ab --- /dev/null +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/azure-daemonset.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloud-node-manager + namespace: testNamespace + labels: + addonmanager.kubernetes.io/mode: Reconcile + component: cloud-node-manager + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: cloud-node-manager + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/daemonset-pod: "true" + labels: + k8s-app: cloud-node-manager + spec: + containers: + - name: cloud-node-manager + image: cnmImageForAzure + imagePullPolicy: IfNotPresent + command: + - cloud-node-manager + - --node-name=$(NODE_NAME) + - --wait-routes=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + requests: + cpu: 50m + memory: 50Mi + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: cloud-node-manager + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + updateStrategy: {} diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrole.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrole.yaml new file mode 100644 index 000000000..886db4e42 --- /dev/null +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cloud-node-manager + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: cloud-node-manager + kubernetes.io/cluster-service: "true" +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get + - update + - patch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..0dae2174e --- /dev/null +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cloud-node-manager + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: cloud-node-manager + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cloud-node-manager +subjects: +- kind: ServiceAccount + name: cloud-node-manager + namespace: testNamespace diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/serviceaccount.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/serviceaccount.yaml new file mode 100644 index 000000000..bdb88c944 --- /dev/null +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-node-manager + namespace: testNamespace + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: cloud-node-manager + kubernetes.io/cluster-service: "true"