mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-30 09:56:21 -05:00
c29107f5be
* Generate kubeconfig with unique name * Move create name flag to config * Add name validation to config * Move name flag in e2e tests to config generation * Remove name flag from create * Update ascii cinema flow --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
605 lines
19 KiB
Go
605 lines
19 KiB
Go
/*
|
|
Copyright (c) Edgeless Systems GmbH
|
|
|
|
SPDX-License-Identifier: AGPL-3.0-only
|
|
*/
|
|
|
|
package kubernetes
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"net"
|
|
"strconv"
|
|
"testing"
|
|
|
|
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
|
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubewaiter"
|
|
"github.com/edgelesssys/constellation/v2/internal/cloud/metadata"
|
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
|
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
|
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/goleak"
|
|
corev1 "k8s.io/api/core/v1"
|
|
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
|
)
|
|
|
|
func TestMain(m *testing.M) {
|
|
goleak.VerifyTestMain(m)
|
|
}
|
|
|
|
func TestInitCluster(t *testing.T) {
|
|
someErr := errors.New("failed")
|
|
serviceAccountURI := "some-service-account-uri"
|
|
|
|
nodeName := "node-name"
|
|
providerID := "provider-id"
|
|
privateIP := "192.0.2.1"
|
|
loadbalancerIP := "192.0.2.3"
|
|
aliasIPRange := "192.0.2.0/24"
|
|
|
|
testCases := map[string]struct {
|
|
clusterUtil stubClusterUtil
|
|
helmClient stubHelmClient
|
|
kubectl stubKubectl
|
|
kubeAPIWaiter stubKubeAPIWaiter
|
|
providerMetadata ProviderMetadata
|
|
wantConfig k8sapi.KubeadmInitYAML
|
|
wantErr bool
|
|
k8sVersion versions.ValidK8sVersion
|
|
}{
|
|
"kubeadm init works with metadata and loadbalancer": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
Name: nodeName,
|
|
ProviderID: providerID,
|
|
VPCIP: privateIP,
|
|
AliasIPRanges: []string{aliasIPRange},
|
|
},
|
|
getLoadBalancerEndpointResp: loadbalancerIP,
|
|
},
|
|
wantConfig: k8sapi.KubeadmInitYAML{
|
|
InitConfiguration: kubeadm.InitConfiguration{
|
|
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
|
KubeletExtraArgs: map[string]string{
|
|
"node-ip": privateIP,
|
|
"provider-id": providerID,
|
|
},
|
|
Name: nodeName,
|
|
},
|
|
},
|
|
ClusterConfiguration: kubeadm.ClusterConfiguration{
|
|
ClusterName: "kubernetes",
|
|
ControlPlaneEndpoint: loadbalancerIP,
|
|
APIServer: kubeadm.APIServer{
|
|
CertSANs: []string{privateIP},
|
|
},
|
|
},
|
|
},
|
|
wantErr: false,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when annotating itself": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
Name: nodeName,
|
|
ProviderID: providerID,
|
|
VPCIP: privateIP,
|
|
AliasIPRanges: []string{aliasIPRange},
|
|
},
|
|
getLoadBalancerEndpointResp: loadbalancerIP,
|
|
},
|
|
kubectl: stubKubectl{annotateNodeErr: someErr},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when retrieving metadata self": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfErr: someErr,
|
|
},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when retrieving metadata loadbalancer ip": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
providerMetadata: &stubProviderMetadata{
|
|
getLoadBalancerEndpointErr: someErr,
|
|
},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when applying the init config": {
|
|
clusterUtil: stubClusterUtil{
|
|
initClusterErr: someErr,
|
|
kubeconfig: []byte("someKubeconfig"),
|
|
},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when deploying cilium": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
helmClient: stubHelmClient{ciliumError: someErr},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when setting up constellation-services chart": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
helmClient: stubHelmClient{servicesError: someErr},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when setting the cloud node manager": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
helmClient: stubHelmClient{servicesError: someErr},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when setting the cluster autoscaler": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
helmClient: stubHelmClient{servicesError: someErr},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when reading kubeconfig": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when setting up konnectivity": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
helmClient: stubHelmClient{servicesError: someErr},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when setting up verification service": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
wantErr: true,
|
|
k8sVersion: versions.Default,
|
|
},
|
|
"kubeadm init fails when waiting for kubeAPI server": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{waitErr: someErr},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
k8sVersion: versions.Default,
|
|
wantErr: true,
|
|
},
|
|
"unsupported k8sVersion fails cluster creation": {
|
|
clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")},
|
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
k8sVersion: "1.19",
|
|
wantErr: true,
|
|
},
|
|
}
|
|
|
|
for name, tc := range testCases {
|
|
t.Run(name, func(t *testing.T) {
|
|
assert := assert.New(t)
|
|
require := require.New(t)
|
|
|
|
kube := KubeWrapper{
|
|
clusterUtil: &tc.clusterUtil,
|
|
helmClient: &tc.helmClient,
|
|
providerMetadata: tc.providerMetadata,
|
|
kubeAPIWaiter: &tc.kubeAPIWaiter,
|
|
configProvider: &stubConfigProvider{initConfig: k8sapi.KubeadmInitYAML{}},
|
|
client: &tc.kubectl,
|
|
getIPAddr: func() (string, error) { return privateIP, nil },
|
|
}
|
|
|
|
_, err := kube.InitCluster(
|
|
context.Background(), serviceAccountURI, string(tc.k8sVersion), "kubernetes",
|
|
nil, nil, false, true, []byte("{}"), false, nil, logger.NewTest(t),
|
|
)
|
|
|
|
if tc.wantErr {
|
|
assert.Error(err)
|
|
return
|
|
}
|
|
require.NoError(err)
|
|
|
|
var kubeadmConfig k8sapi.KubeadmInitYAML
|
|
require.NoError(kubernetes.UnmarshalK8SResources(tc.clusterUtil.initConfigs[0], &kubeadmConfig))
|
|
require.Equal(tc.wantConfig.ClusterConfiguration, kubeadmConfig.ClusterConfiguration)
|
|
require.Equal(tc.wantConfig.InitConfiguration, kubeadmConfig.InitConfiguration)
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestJoinCluster(t *testing.T) {
|
|
someErr := errors.New("failed")
|
|
joinCommand := &kubeadm.BootstrapTokenDiscovery{
|
|
APIServerEndpoint: "192.0.2.0:" + strconv.Itoa(constants.KubernetesPort),
|
|
Token: "kube-fake-token",
|
|
CACertHashes: []string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"},
|
|
}
|
|
|
|
privateIP := "192.0.2.1"
|
|
k8sVersion := versions.Default
|
|
|
|
k8sComponents := components.Components{
|
|
{
|
|
URL: "URL",
|
|
Hash: "Hash",
|
|
InstallPath: "InstallPath",
|
|
Extract: true,
|
|
},
|
|
}
|
|
|
|
testCases := map[string]struct {
|
|
clusterUtil stubClusterUtil
|
|
providerMetadata ProviderMetadata
|
|
wantConfig kubeadm.JoinConfiguration
|
|
role role.Role
|
|
k8sComponents components.Components
|
|
wantErr bool
|
|
}{
|
|
"kubeadm join worker works with metadata and remote Kubernetes Components": {
|
|
clusterUtil: stubClusterUtil{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
ProviderID: "provider-id",
|
|
Name: "metadata-name",
|
|
VPCIP: "192.0.2.1",
|
|
},
|
|
},
|
|
k8sComponents: k8sComponents,
|
|
role: role.Worker,
|
|
wantConfig: kubeadm.JoinConfiguration{
|
|
Discovery: kubeadm.Discovery{
|
|
BootstrapToken: joinCommand,
|
|
},
|
|
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
|
Name: "metadata-name",
|
|
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
|
},
|
|
},
|
|
},
|
|
"kubeadm join worker works with metadata and local Kubernetes components": {
|
|
clusterUtil: stubClusterUtil{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
ProviderID: "provider-id",
|
|
Name: "metadata-name",
|
|
VPCIP: "192.0.2.1",
|
|
},
|
|
},
|
|
role: role.Worker,
|
|
wantConfig: kubeadm.JoinConfiguration{
|
|
Discovery: kubeadm.Discovery{
|
|
BootstrapToken: joinCommand,
|
|
},
|
|
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
|
Name: "metadata-name",
|
|
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
|
},
|
|
},
|
|
},
|
|
"kubeadm join worker works with metadata and cloud controller manager": {
|
|
clusterUtil: stubClusterUtil{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
ProviderID: "provider-id",
|
|
Name: "metadata-name",
|
|
VPCIP: "192.0.2.1",
|
|
},
|
|
},
|
|
role: role.Worker,
|
|
wantConfig: kubeadm.JoinConfiguration{
|
|
Discovery: kubeadm.Discovery{
|
|
BootstrapToken: joinCommand,
|
|
},
|
|
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
|
Name: "metadata-name",
|
|
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
|
},
|
|
},
|
|
},
|
|
"kubeadm join control-plane node works with metadata": {
|
|
clusterUtil: stubClusterUtil{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
ProviderID: "provider-id",
|
|
Name: "metadata-name",
|
|
VPCIP: "192.0.2.1",
|
|
},
|
|
},
|
|
role: role.ControlPlane,
|
|
wantConfig: kubeadm.JoinConfiguration{
|
|
Discovery: kubeadm.Discovery{
|
|
BootstrapToken: joinCommand,
|
|
},
|
|
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
|
Name: "metadata-name",
|
|
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
|
},
|
|
ControlPlane: &kubeadm.JoinControlPlane{
|
|
LocalAPIEndpoint: kubeadm.APIEndpoint{
|
|
AdvertiseAddress: "192.0.2.1",
|
|
BindPort: constants.KubernetesPort,
|
|
},
|
|
},
|
|
SkipPhases: []string{"control-plane-prepare/download-certs"},
|
|
},
|
|
},
|
|
"kubeadm join worker fails when installing remote Kubernetes components": {
|
|
clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfResp: metadata.InstanceMetadata{
|
|
ProviderID: "provider-id",
|
|
Name: "metadata-name",
|
|
VPCIP: "192.0.2.1",
|
|
},
|
|
},
|
|
k8sComponents: k8sComponents,
|
|
role: role.Worker,
|
|
wantErr: true,
|
|
},
|
|
"kubeadm join worker fails when retrieving self metadata": {
|
|
clusterUtil: stubClusterUtil{},
|
|
providerMetadata: &stubProviderMetadata{
|
|
selfErr: someErr,
|
|
},
|
|
role: role.Worker,
|
|
wantErr: true,
|
|
},
|
|
"kubeadm join worker fails when applying the join config": {
|
|
clusterUtil: stubClusterUtil{joinClusterErr: someErr},
|
|
providerMetadata: &stubProviderMetadata{},
|
|
role: role.Worker,
|
|
wantErr: true,
|
|
},
|
|
}
|
|
|
|
for name, tc := range testCases {
|
|
t.Run(name, func(t *testing.T) {
|
|
assert := assert.New(t)
|
|
require := require.New(t)
|
|
|
|
kube := KubeWrapper{
|
|
clusterUtil: &tc.clusterUtil,
|
|
providerMetadata: tc.providerMetadata,
|
|
configProvider: &stubConfigProvider{},
|
|
getIPAddr: func() (string, error) { return privateIP, nil },
|
|
}
|
|
|
|
err := kube.JoinCluster(context.Background(), joinCommand, tc.role, string(k8sVersion), tc.k8sComponents, logger.NewTest(t))
|
|
if tc.wantErr {
|
|
assert.Error(err)
|
|
return
|
|
}
|
|
require.NoError(err)
|
|
|
|
var joinYaml k8sapi.KubeadmJoinYAML
|
|
require.NoError(kubernetes.UnmarshalK8SResources(tc.clusterUtil.joinConfigs[0], &joinYaml))
|
|
|
|
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestK8sCompliantHostname(t *testing.T) {
|
|
testCases := map[string]struct {
|
|
input string
|
|
expected string
|
|
wantErr bool
|
|
}{
|
|
"no change": {
|
|
input: "test",
|
|
expected: "test",
|
|
},
|
|
"uppercase": {
|
|
input: "TEST",
|
|
expected: "test",
|
|
},
|
|
"underscore": {
|
|
input: "test_node",
|
|
expected: "test-node",
|
|
},
|
|
"empty": {
|
|
input: "",
|
|
expected: "",
|
|
wantErr: true,
|
|
},
|
|
"error": {
|
|
input: "test_node_",
|
|
expected: "",
|
|
wantErr: true,
|
|
},
|
|
}
|
|
|
|
for name, tc := range testCases {
|
|
t.Run(name, func(t *testing.T) {
|
|
assert := assert.New(t)
|
|
|
|
actual, err := k8sCompliantHostname(tc.input)
|
|
if tc.wantErr {
|
|
assert.Error(err)
|
|
return
|
|
}
|
|
assert.NoError(err)
|
|
assert.Equal(tc.expected, actual)
|
|
})
|
|
}
|
|
}
|
|
|
|
type stubClusterUtil struct {
|
|
installComponentsErr error
|
|
initClusterErr error
|
|
setupAutoscalingError error
|
|
setupKonnectivityError error
|
|
setupGCPGuestAgentErr error
|
|
setupOLMErr error
|
|
setupNMOErr error
|
|
setupNodeOperatorErr error
|
|
joinClusterErr error
|
|
startKubeletErr error
|
|
|
|
kubeconfig []byte
|
|
|
|
initConfigs [][]byte
|
|
joinConfigs [][]byte
|
|
}
|
|
|
|
func (s *stubClusterUtil) SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error {
|
|
return s.setupKonnectivityError
|
|
}
|
|
|
|
func (s *stubClusterUtil) InstallComponents(ctx context.Context, kubernetesComponents components.Components) error {
|
|
return s.installComponentsErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) ([]byte, error) {
|
|
s.initConfigs = append(s.initConfigs, initConfig)
|
|
return s.kubeconfig, s.initClusterErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error {
|
|
return s.setupAutoscalingError
|
|
}
|
|
|
|
func (s *stubClusterUtil) SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error {
|
|
return s.setupGCPGuestAgentErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) SetupOperatorLifecycleManager(ctx context.Context, kubectl k8sapi.Client, olmCRDs, olmConfiguration kubernetes.Marshaler, crdNames []string) error {
|
|
return s.setupOLMErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) SetupNodeMaintenanceOperator(kubectl k8sapi.Client, nodeMaintenanceOperatorConfiguration kubernetes.Marshaler) error {
|
|
return s.setupNMOErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) SetupNodeOperator(ctx context.Context, kubectl k8sapi.Client, nodeOperatorConfiguration kubernetes.Marshaler) error {
|
|
return s.setupNodeOperatorErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error {
|
|
s.joinConfigs = append(s.joinConfigs, joinConfig)
|
|
return s.joinClusterErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) StartKubelet() error {
|
|
return s.startKubeletErr
|
|
}
|
|
|
|
func (s *stubClusterUtil) FixCilium(log *logger.Logger) {
|
|
}
|
|
|
|
type stubConfigProvider struct {
|
|
initConfig k8sapi.KubeadmInitYAML
|
|
joinConfig k8sapi.KubeadmJoinYAML
|
|
}
|
|
|
|
func (s *stubConfigProvider) InitConfiguration(_ bool, _ string) k8sapi.KubeadmInitYAML {
|
|
return s.initConfig
|
|
}
|
|
|
|
func (s *stubConfigProvider) JoinConfiguration(_ bool) k8sapi.KubeadmJoinYAML {
|
|
s.joinConfig = k8sapi.KubeadmJoinYAML{
|
|
JoinConfiguration: kubeadm.JoinConfiguration{
|
|
Discovery: kubeadm.Discovery{
|
|
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
|
|
},
|
|
},
|
|
}
|
|
return s.joinConfig
|
|
}
|
|
|
|
type stubKubectl struct {
|
|
createConfigMapErr error
|
|
addTolerationsToDeploymentErr error
|
|
addTNodeSelectorsToDeploymentErr error
|
|
waitForCRDsErr error
|
|
listAllNamespacesErr error
|
|
annotateNodeErr error
|
|
|
|
listAllNamespacesResp *corev1.NamespaceList
|
|
}
|
|
|
|
func (s *stubKubectl) Initialize(kubeconfig []byte) error {
|
|
return nil
|
|
}
|
|
|
|
func (s *stubKubectl) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error {
|
|
return s.createConfigMapErr
|
|
}
|
|
|
|
func (s *stubKubectl) AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error {
|
|
return s.addTolerationsToDeploymentErr
|
|
}
|
|
|
|
func (s *stubKubectl) AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error {
|
|
return s.addTNodeSelectorsToDeploymentErr
|
|
}
|
|
|
|
func (s *stubKubectl) AnnotateNode(ctx context.Context, nodeName, annotationKey, annotationValue string) error {
|
|
return s.annotateNodeErr
|
|
}
|
|
|
|
func (s *stubKubectl) WaitForCRDs(ctx context.Context, crds []string) error {
|
|
return s.waitForCRDsErr
|
|
}
|
|
|
|
func (s *stubKubectl) ListAllNamespaces(ctx context.Context) (*corev1.NamespaceList, error) {
|
|
return s.listAllNamespacesResp, s.listAllNamespacesErr
|
|
}
|
|
|
|
type stubHelmClient struct {
|
|
ciliumError error
|
|
certManagerError error
|
|
operatorsError error
|
|
servicesError error
|
|
}
|
|
|
|
func (s *stubHelmClient) InstallCilium(ctx context.Context, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
|
|
return s.ciliumError
|
|
}
|
|
|
|
func (s *stubHelmClient) InstallCertManager(ctx context.Context, release helm.Release) error {
|
|
return s.certManagerError
|
|
}
|
|
|
|
func (s *stubHelmClient) InstallOperators(ctx context.Context, release helm.Release, extraVals map[string]any) error {
|
|
return s.operatorsError
|
|
}
|
|
|
|
func (s *stubHelmClient) InstallConstellationServices(ctx context.Context, release helm.Release, extraVals map[string]any) error {
|
|
return s.servicesError
|
|
}
|
|
|
|
type stubKubeAPIWaiter struct {
|
|
waitErr error
|
|
}
|
|
|
|
func (s *stubKubeAPIWaiter) Wait(_ context.Context, _ kubewaiter.KubernetesClient) error {
|
|
return s.waitErr
|
|
}
|