mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-11 15:39:33 -05:00
AB#2582: deploy CNM via Helm (#423)
This commit is contained in:
parent
4b257616e4
commit
e363f03240
@ -104,11 +104,10 @@ func main() {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to set up cloud logger")
|
||||
}
|
||||
|
||||
nodeManager := &awscloud.CloudNodeManager{}
|
||||
cloudControllerManager := &awscloud.CloudControllerManager{}
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), cloudControllerManager,
|
||||
nodeManager, &gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
&gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
)
|
||||
openTPM = vtpm.OpenVTPM
|
||||
fs = afero.NewOsFs()
|
||||
@ -145,7 +144,7 @@ func main() {
|
||||
}
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"gcp", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), cloudControllerManager,
|
||||
&gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
&gcpcloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
)
|
||||
openTPM = vtpm.OpenVTPM
|
||||
fs = afero.NewOsFs()
|
||||
@ -179,7 +178,7 @@ func main() {
|
||||
}
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), azurecloud.NewCloudControllerManager(metadata),
|
||||
&azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
&azurecloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
)
|
||||
|
||||
openTPM = vtpm.OpenVTPM
|
||||
@ -201,7 +200,7 @@ func main() {
|
||||
}
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"qemu", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(), &qemucloud.CloudControllerManager{},
|
||||
&qemucloud.CloudNodeManager{}, &qemucloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
&qemucloud.Autoscaler{}, metadata, pcrsJSON, helmClient,
|
||||
)
|
||||
metadataAPI = metadata
|
||||
|
||||
|
@ -58,8 +58,6 @@ func (h *Client) InstallConstellationServices(ctx context.Context, release helm.
|
||||
h.ReleaseName = release.ReleaseName
|
||||
h.Wait = release.Wait
|
||||
h.Timeout = timeout
|
||||
// update dependencies - unsure if necessary for local deps.
|
||||
h.DependencyUpdate = true
|
||||
|
||||
mergedVals := mergeMaps(release.Values, extraVals)
|
||||
|
||||
|
@ -187,30 +187,6 @@ func (m *stubCloudControllerManager) Supported() bool {
|
||||
return m.SupportedResp
|
||||
}
|
||||
|
||||
type stubCloudNodeManager struct {
|
||||
SupportedResp bool
|
||||
|
||||
ImageResp string
|
||||
PathResp string
|
||||
ExtraArgsResp []string
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) Image(k8sVersion versions.ValidK8sVersion) (string, error) {
|
||||
return m.ImageResp, nil
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) Path() string {
|
||||
return m.PathResp
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) ExtraArgs() []string {
|
||||
return m.ExtraArgsResp
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) Supported() bool {
|
||||
return m.SupportedResp
|
||||
}
|
||||
|
||||
type stubClusterAutoscaler struct {
|
||||
SupportedResp bool
|
||||
}
|
||||
|
@ -328,25 +328,6 @@ func (k *KubernetesUtil) SetupGCPGuestAgent(kubectl Client, guestAgentDaemonset
|
||||
return kubectl.Apply(guestAgentDaemonset, true)
|
||||
}
|
||||
|
||||
// SetupCloudControllerManager deploys the k8s cloud-controller-manager.
|
||||
func (k *KubernetesUtil) SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error {
|
||||
if err := kubectl.Apply(configMaps, true); err != nil {
|
||||
return fmt.Errorf("applying ccm ConfigMaps: %w", err)
|
||||
}
|
||||
if err := kubectl.Apply(secrets, true); err != nil {
|
||||
return fmt.Errorf("applying ccm Secrets: %w", err)
|
||||
}
|
||||
if err := kubectl.Apply(cloudControllerManagerConfiguration, true); err != nil {
|
||||
return fmt.Errorf("applying ccm: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupCloudNodeManager deploys the k8s cloud-node-manager.
|
||||
func (k *KubernetesUtil) SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error {
|
||||
return kubectl.Apply(cloudNodeManagerConfiguration, true)
|
||||
}
|
||||
|
||||
// SetupAccessManager deploys the constellation-access-manager for deploying SSH keys on control-plane & worker nodes.
|
||||
func (k *KubernetesUtil) SetupAccessManager(kubectl Client, accessManagerConfiguration kubernetes.Marshaler) error {
|
||||
return kubectl.Apply(accessManagerConfiguration, true)
|
||||
|
@ -1,187 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type CloudNodeManagerDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
DaemonSet apps.DaemonSet
|
||||
}
|
||||
|
||||
// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP.
|
||||
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *CloudNodeManagerDeployment {
|
||||
command := []string{
|
||||
path,
|
||||
"--node-name=$(NODE_NAME)",
|
||||
}
|
||||
command = append(command, extraArgs...)
|
||||
return &CloudNodeManagerDeployment{
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ServiceAccount",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "cloud-node-manager",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "cloud-node-manager",
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "cloud-node-manager",
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "cloud-node-manager",
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
Verbs: []string{"watch", "list", "get", "update", "patch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes/status"},
|
||||
Verbs: []string{"patch"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRoleBinding: rbac.ClusterRoleBinding{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRoleBinding",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "cloud-node-manager",
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "cloud-node-manager",
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "cloud-node-manager",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "cloud-node-manager",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "cloud-node-manager",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"component": "cloud-node-manager",
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &meta.LabelSelector{
|
||||
MatchLabels: map[string]string{"k8s-app": "cloud-node-manager"},
|
||||
},
|
||||
Template: k8s.PodTemplateSpec{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{"k8s-app": "cloud-node-manager"},
|
||||
Annotations: map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
PriorityClassName: "system-node-critical",
|
||||
ServiceAccountName: "cloud-node-manager",
|
||||
HostNetwork: true,
|
||||
NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Key: "CriticalAddonsOnly",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: k8s.TolerationOpEqual,
|
||||
Value: "true",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
Containers: []k8s.Container{
|
||||
{
|
||||
Name: "cloud-node-manager",
|
||||
Image: image,
|
||||
ImagePullPolicy: k8s.PullIfNotPresent,
|
||||
Command: command,
|
||||
Env: []k8s.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &k8s.EnvVarSource{
|
||||
FieldRef: &k8s.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: k8s.ResourceRequirements{
|
||||
Requests: k8s.ResourceList{
|
||||
k8s.ResourceCPU: resource.MustParse("50m"),
|
||||
k8s.ResourceMemory: resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal marshals the cloud-node-manager deployment as YAML documents.
|
||||
func (c *CloudNodeManagerDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCloudNodeManagerMarshalUnmarshal(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
cloudNodeManagerDepl := NewDefaultCloudNodeManagerDeployment("image", "path", []string{})
|
||||
data, err := cloudNodeManagerDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated CloudNodeManagerDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(cloudNodeManagerDepl, &recreated)
|
||||
}
|
@ -24,8 +24,6 @@ type clusterUtil interface {
|
||||
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
||||
SetupAccessManager(kubectl k8sapi.Client, sshUsers kubernetes.Marshaler) error
|
||||
SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error
|
||||
SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error
|
||||
SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error
|
||||
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error
|
||||
|
@ -54,7 +54,6 @@ type KubeWrapper struct {
|
||||
client k8sapi.Client
|
||||
kubeconfigReader configReader
|
||||
cloudControllerManager CloudControllerManager
|
||||
cloudNodeManager CloudNodeManager
|
||||
clusterAutoscaler ClusterAutoscaler
|
||||
providerMetadata ProviderMetadata
|
||||
initialMeasurementsJSON []byte
|
||||
@ -63,7 +62,7 @@ type KubeWrapper struct {
|
||||
|
||||
// New creates a new KubeWrapper with real values.
|
||||
func New(cloudProvider string, clusterUtil clusterUtil, configProvider configurationProvider, client k8sapi.Client, cloudControllerManager CloudControllerManager,
|
||||
cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata, initialMeasurementsJSON []byte, helmClient helmClient,
|
||||
clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata, initialMeasurementsJSON []byte, helmClient helmClient,
|
||||
) *KubeWrapper {
|
||||
return &KubeWrapper{
|
||||
cloudProvider: cloudProvider,
|
||||
@ -73,7 +72,6 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
||||
client: client,
|
||||
kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}},
|
||||
cloudControllerManager: cloudControllerManager,
|
||||
cloudNodeManager: cloudNodeManager,
|
||||
clusterAutoscaler: clusterAutoscaler,
|
||||
providerMetadata: providerMetadata,
|
||||
initialMeasurementsJSON: initialMeasurementsJSON,
|
||||
@ -210,10 +208,6 @@ func (k *KubeWrapper) InitCluster(
|
||||
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
|
||||
}
|
||||
|
||||
if err := k.setupCloudNodeManager(k8sVersion); err != nil {
|
||||
return nil, fmt.Errorf("setting up cloud node manager: %w", err)
|
||||
}
|
||||
|
||||
if err := k.setupClusterAutoscaler(instance, cloudServiceAccountURI, k8sVersion); err != nil {
|
||||
return nil, fmt.Errorf("setting up cluster autoscaler: %w", err)
|
||||
}
|
||||
@ -322,25 +316,6 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
|
||||
return k.kubeconfigReader.ReadKubeconfig()
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupCloudNodeManager(k8sVersion versions.ValidK8sVersion) error {
|
||||
if !k.cloudNodeManager.Supported() {
|
||||
return nil
|
||||
}
|
||||
nodeManagerImage, err := k.cloudNodeManager.Image(k8sVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining Image for Node Manager: %w", err)
|
||||
}
|
||||
|
||||
cloudNodeManagerConfiguration := resources.NewDefaultCloudNodeManagerDeployment(
|
||||
nodeManagerImage, k.cloudNodeManager.Path(), k.cloudNodeManager.ExtraArgs(),
|
||||
)
|
||||
if err := k.clusterUtil.SetupCloudNodeManager(k.client, cloudNodeManagerConfiguration); err != nil {
|
||||
return fmt.Errorf("setting up cloud-node-manager: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupClusterAutoscaler(instance metadata.InstanceMetadata, cloudServiceAccountURI string, k8sVersion versions.ValidK8sVersion) error {
|
||||
if !k.clusterAutoscaler.Supported() {
|
||||
return nil
|
||||
|
@ -49,7 +49,6 @@ func TestInitCluster(t *testing.T) {
|
||||
kubectl stubKubectl
|
||||
providerMetadata ProviderMetadata
|
||||
CloudControllerManager CloudControllerManager
|
||||
CloudNodeManager CloudNodeManager
|
||||
ClusterAutoscaler ClusterAutoscaler
|
||||
kubeconfigReader configReader
|
||||
wantConfig k8sapi.KubeadmInitYAML
|
||||
@ -63,7 +62,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{SupportedResp: false},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantConfig: k8sapi.KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
@ -96,7 +94,6 @@ func TestInitCluster(t *testing.T) {
|
||||
SupportsLoadBalancerResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantConfig: k8sapi.KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
@ -128,7 +125,6 @@ func TestInitCluster(t *testing.T) {
|
||||
SupportedResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -143,7 +139,6 @@ func TestInitCluster(t *testing.T) {
|
||||
SupportedResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -159,7 +154,6 @@ func TestInitCluster(t *testing.T) {
|
||||
SupportedResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -171,7 +165,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -184,7 +177,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -197,19 +189,18 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{SupportedResp: true},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
},
|
||||
"kubeadm init fails when setting the cloud node manager": {
|
||||
clusterUtil: stubClusterUtil{setupCloudNodeManagerError: someErr},
|
||||
clusterUtil: stubClusterUtil{},
|
||||
helmClient: stubHelmClient{servicesError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: true},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -221,7 +212,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{SupportedResp: true},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -233,7 +223,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -245,7 +234,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{SupportedResp: false},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -257,7 +245,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{SupportedResp: false},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
k8sVersion: versions.Default,
|
||||
@ -269,7 +256,6 @@ func TestInitCluster(t *testing.T) {
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
k8sVersion: "1.19",
|
||||
wantErr: true,
|
||||
@ -286,7 +272,6 @@ func TestInitCluster(t *testing.T) {
|
||||
helmClient: &tc.helmClient,
|
||||
providerMetadata: tc.providerMetadata,
|
||||
cloudControllerManager: tc.CloudControllerManager,
|
||||
cloudNodeManager: tc.CloudNodeManager,
|
||||
clusterAutoscaler: tc.ClusterAutoscaler,
|
||||
configProvider: &stubConfigProvider{InitConfig: k8sapi.KubeadmInitYAML{}},
|
||||
client: &tc.kubectl,
|
||||
@ -506,8 +491,6 @@ type stubClusterUtil struct {
|
||||
installComponentsErr error
|
||||
initClusterErr error
|
||||
setupAutoscalingError error
|
||||
setupCloudControllerManagerError error
|
||||
setupCloudNodeManagerError error
|
||||
setupKonnectivityError error
|
||||
setupAccessManagerError error
|
||||
setupVerificationServiceErr error
|
||||
@ -543,18 +526,10 @@ func (s *stubClusterUtil) SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgen
|
||||
return s.setupGCPGuestAgentErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error {
|
||||
return s.setupCloudControllerManagerError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupAccessManager(kubectl k8sapi.Client, accessManagerConfiguration kubernetes.Marshaler) error {
|
||||
return s.setupAccessManagerError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error {
|
||||
return s.setupCloudNodeManagerError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error {
|
||||
return s.setupVerificationServiceErr
|
||||
}
|
||||
|
@ -25,3 +25,7 @@ dependencies:
|
||||
- Azure
|
||||
- GCP
|
||||
- AWS
|
||||
- name: cnm
|
||||
version: 2.2.0-pre
|
||||
tags:
|
||||
- Azure
|
||||
|
@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
@ -0,0 +1,5 @@
|
||||
apiVersion: v2
|
||||
name: cnm
|
||||
description: A chart to deploy cloud node manager for constellation
|
||||
type: application
|
||||
version: 2.2.0-pre
|
@ -0,0 +1,57 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
namespace: {{ .Release.Namespace}}
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
component: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cloud-node-manager
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
|
||||
labels:
|
||||
k8s-app: cloud-node-manager
|
||||
spec:
|
||||
containers:
|
||||
- name: cloud-node-manager
|
||||
image: {{ .Values.image }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cloud-node-manager
|
||||
- --node-name=$(NODE_NAME)
|
||||
- --wait-routes=true
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: cloud-node-manager
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
value: "true"
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
updateStrategy: {}
|
@ -0,0 +1,25 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cloud-node-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-node-manager
|
||||
namespace: {{ .Release.Namespace}}
|
@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
namespace: {{ .Release.Namespace}}
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
@ -0,0 +1,15 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft-07/schema#",
|
||||
"properties": {
|
||||
"image": {
|
||||
"description": "Container image to use for the spawned pods.",
|
||||
"type": "string",
|
||||
"examples": ["mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.23.21"]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"image"
|
||||
],
|
||||
"title": "Values",
|
||||
"type": "object"
|
||||
}
|
@ -38,15 +38,17 @@ type ChartLoader struct {
|
||||
joinServiceImage string
|
||||
kmsImage string
|
||||
ccmImage string
|
||||
cnmImage string
|
||||
}
|
||||
|
||||
func New(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion) *ChartLoader {
|
||||
var ccmImage string
|
||||
var ccmImage, cnmImage string
|
||||
switch csp {
|
||||
case cloudprovider.AWS:
|
||||
ccmImage = versions.VersionConfigs[k8sVersion].CloudControllerManagerImageAWS
|
||||
case cloudprovider.Azure:
|
||||
ccmImage = versions.VersionConfigs[k8sVersion].CloudControllerManagerImageAzure
|
||||
cnmImage = versions.VersionConfigs[k8sVersion].CloudNodeManagerImageAzure
|
||||
case cloudprovider.GCP:
|
||||
ccmImage = versions.VersionConfigs[k8sVersion].CloudControllerManagerImageGCP
|
||||
}
|
||||
@ -55,6 +57,7 @@ func New(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion) *Chart
|
||||
joinServiceImage: versions.JoinImage,
|
||||
kmsImage: versions.KmsImage,
|
||||
ccmImage: ccmImage,
|
||||
cnmImage: cnmImage,
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,7 +159,7 @@ func (i *ChartLoader) loadConstellationServices(csp cloudprovider.Provider,
|
||||
"image": i.joinServiceImage,
|
||||
"namespace": constants.ConstellationNamespace,
|
||||
},
|
||||
"ccm": map[string]interface{}{
|
||||
"ccm": map[string]any{
|
||||
"csp": csp,
|
||||
},
|
||||
}
|
||||
@ -178,6 +181,10 @@ func (i *ChartLoader) loadConstellationServices(csp cloudprovider.Provider,
|
||||
"image": i.ccmImage,
|
||||
}
|
||||
|
||||
vals["cnm"] = map[string]any{
|
||||
"image": i.cnmImage,
|
||||
}
|
||||
|
||||
vals["tags"] = map[string]any{
|
||||
"Azure": true,
|
||||
}
|
||||
|
@ -50,6 +50,7 @@ func TestTemplate(t *testing.T) {
|
||||
enforceIDKeyDigest bool
|
||||
valuesModifier func(map[string]any) error
|
||||
ccmImage string
|
||||
cnmImage string
|
||||
}{
|
||||
"GCP": {
|
||||
csp: cloudprovider.GCP,
|
||||
@ -62,6 +63,7 @@ func TestTemplate(t *testing.T) {
|
||||
enforceIDKeyDigest: true,
|
||||
valuesModifier: prepareAzureValues,
|
||||
ccmImage: "ccmImageForAzure",
|
||||
cnmImage: "cnmImageForAzure",
|
||||
},
|
||||
"QEMU": {
|
||||
csp: cloudprovider.QEMU,
|
||||
@ -75,7 +77,7 @@ func TestTemplate(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
chartLoader := ChartLoader{joinServiceImage: "joinServiceImage", kmsImage: "kmsImage", ccmImage: tc.ccmImage}
|
||||
chartLoader := ChartLoader{joinServiceImage: "joinServiceImage", kmsImage: "kmsImage", ccmImage: tc.ccmImage, cnmImage: tc.cnmImage}
|
||||
release, err := chartLoader.Load(tc.csp, true, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []uint32{1, 11}, tc.enforceIDKeyDigest)
|
||||
require.NoError(err)
|
||||
|
||||
@ -98,8 +100,13 @@ func TestTemplate(t *testing.T) {
|
||||
err = tc.valuesModifier(helmReleases.ConstellationServices.Values)
|
||||
require.NoError(err)
|
||||
|
||||
// This step is needed to enabled/disable subcharts according to their tags/conditions.
|
||||
err = chartutil.ProcessDependencies(chart, helmReleases.ConstellationServices.Values)
|
||||
require.NoError(err)
|
||||
|
||||
valuesToRender, err := chartutil.ToRenderValues(chart, helmReleases.ConstellationServices.Values, options, caps)
|
||||
require.NoError(err)
|
||||
|
||||
result, err := engine.Render(chart, valuesToRender)
|
||||
require.NoError(err)
|
||||
for k, v := range result {
|
||||
|
@ -0,0 +1,57 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
namespace: testNamespace
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
component: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cloud-node-manager
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
|
||||
labels:
|
||||
k8s-app: cloud-node-manager
|
||||
spec:
|
||||
containers:
|
||||
- name: cloud-node-manager
|
||||
image: cnmImageForAzure
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cloud-node-manager
|
||||
- --node-name=$(NODE_NAME)
|
||||
- --wait-routes=true
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: cloud-node-manager
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
value: "true"
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
updateStrategy: {}
|
25
cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrole.yaml
vendored
Normal file
25
cli/internal/helm/testdata/Azure/constellation-services/charts/cnm/templates/clusterrole.yaml
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cloud-node-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cloud-node-manager
|
||||
namespace: testNamespace
|
@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-node-manager
|
||||
namespace: testNamespace
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
k8s-app: cloud-node-manager
|
||||
kubernetes.io/cluster-service: "true"
|
Loading…
Reference in New Issue
Block a user