mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
join: deprecate components migration fallback (#833)
This commit is contained in:
parent
16b002ec1d
commit
0297aed1ea
@ -75,8 +75,8 @@ func NewKubernetesUtil() *KubernetesUtil {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallComponentsFromCLI installs the kubernetes components passed from the CLI.
|
// InstallComponents installs the kubernetes components passed from the CLI.
|
||||||
func (k *KubernetesUtil) InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
||||||
for _, component := range kubernetesComponents {
|
for _, component := range kubernetesComponents {
|
||||||
if err := k.inst.Install(ctx, component); err != nil {
|
if err := k.inst.Install(ctx, component); err != nil {
|
||||||
return fmt.Errorf("installing kubernetes component from URL %s: %w", component.URL, err)
|
return fmt.Errorf("installing kubernetes component from URL %s: %w", component.URL, err)
|
||||||
@ -86,20 +86,6 @@ func (k *KubernetesUtil) InstallComponentsFromCLI(ctx context.Context, kubernete
|
|||||||
return enableSystemdUnit(ctx, kubeletServicePath)
|
return enableSystemdUnit(ctx, kubeletServicePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallComponents installs kubernetes components in the version specified.
|
|
||||||
// TODO(AB#2543,3u13r): Remove this function once the JoinService is extended.
|
|
||||||
func (k *KubernetesUtil) InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error {
|
|
||||||
versionConf := versions.VersionConfigs[version]
|
|
||||||
|
|
||||||
for _, component := range versionConf.KubernetesComponents {
|
|
||||||
if err := k.inst.Install(ctx, component); err != nil {
|
|
||||||
return fmt.Errorf("installing kubernetes component from URL %s: %w", component.URL, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return enableSystemdUnit(ctx, kubeletServicePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitCluster instruments kubeadm to initialize the K8s cluster.
|
// InitCluster instruments kubeadm to initialize the K8s cluster.
|
||||||
func (k *KubernetesUtil) InitCluster(
|
func (k *KubernetesUtil) InitCluster(
|
||||||
ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger,
|
ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger,
|
||||||
|
@ -18,8 +18,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type clusterUtil interface {
|
type clusterUtil interface {
|
||||||
InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error
|
InstallComponents(ctx context.Context, kubernetesComponents versions.ComponentVersions) error
|
||||||
InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error
|
|
||||||
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error
|
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error
|
||||||
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
||||||
FixCilium(log *logger.Logger)
|
FixCilium(log *logger.Logger)
|
||||||
|
@ -97,7 +97,7 @@ func (k *KubeWrapper) InitCluster(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.With(zap.String("version", string(k8sVersion))).Infof("Installing Kubernetes components")
|
log.With(zap.String("version", string(k8sVersion))).Infof("Installing Kubernetes components")
|
||||||
if err := k.clusterUtil.InstallComponentsFromCLI(ctx, kubernetesComponents); err != nil {
|
if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,22 +260,10 @@ func (k *KubeWrapper) InitCluster(
|
|||||||
|
|
||||||
// JoinCluster joins existing Kubernetes cluster.
|
// JoinCluster joins existing Kubernetes cluster.
|
||||||
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, versionString string, k8sComponents versions.ComponentVersions, log *logger.Logger) error {
|
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, versionString string, k8sComponents versions.ComponentVersions, log *logger.Logger) error {
|
||||||
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
log.With("k8sComponents", k8sComponents).Infof("Installing provided kubernetes components")
|
||||||
if err != nil {
|
if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(k8sComponents) != 0 {
|
|
||||||
log.With("k8sComponents", k8sComponents).Infof("Using provided kubernetes components")
|
|
||||||
if err := k.clusterUtil.InstallComponentsFromCLI(ctx, k8sComponents); err != nil {
|
|
||||||
return fmt.Errorf("installing kubernetes components: %w", err)
|
return fmt.Errorf("installing kubernetes components: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
log.With(zap.String("version", string(k8sVersion))).Infof("Installing Kubernetes components")
|
|
||||||
if err := k.clusterUtil.InstallComponents(ctx, k8sVersion); err != nil {
|
|
||||||
return fmt.Errorf("installing kubernetes components: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
||||||
log.Infof("Retrieving node metadata")
|
log.Infof("Retrieving node metadata")
|
||||||
|
@ -299,7 +299,6 @@ func TestJoinCluster(t *testing.T) {
|
|||||||
wantConfig kubeadm.JoinConfiguration
|
wantConfig kubeadm.JoinConfiguration
|
||||||
role role.Role
|
role role.Role
|
||||||
k8sComponents versions.ComponentVersions
|
k8sComponents versions.ComponentVersions
|
||||||
wantComponentsFromCLI bool
|
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"kubeadm join worker works with metadata and remote Kubernetes Components": {
|
"kubeadm join worker works with metadata and remote Kubernetes Components": {
|
||||||
@ -313,7 +312,6 @@ func TestJoinCluster(t *testing.T) {
|
|||||||
},
|
},
|
||||||
k8sComponents: k8sComponents,
|
k8sComponents: k8sComponents,
|
||||||
role: role.Worker,
|
role: role.Worker,
|
||||||
wantComponentsFromCLI: true,
|
|
||||||
wantConfig: kubeadm.JoinConfiguration{
|
wantConfig: kubeadm.JoinConfiguration{
|
||||||
Discovery: kubeadm.Discovery{
|
Discovery: kubeadm.Discovery{
|
||||||
BootstrapToken: joinCommand,
|
BootstrapToken: joinCommand,
|
||||||
@ -392,7 +390,7 @@ func TestJoinCluster(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"kubeadm join worker fails when installing remote Kubernetes components": {
|
"kubeadm join worker fails when installing remote Kubernetes components": {
|
||||||
clusterUtil: stubClusterUtil{installComponentsFromCLIErr: errors.New("error")},
|
clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
selfResp: metadata.InstanceMetadata{
|
selfResp: metadata.InstanceMetadata{
|
||||||
ProviderID: "provider-id",
|
ProviderID: "provider-id",
|
||||||
@ -402,20 +400,6 @@ func TestJoinCluster(t *testing.T) {
|
|||||||
},
|
},
|
||||||
k8sComponents: k8sComponents,
|
k8sComponents: k8sComponents,
|
||||||
role: role.Worker,
|
role: role.Worker,
|
||||||
wantComponentsFromCLI: true,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"kubeadm join worker fails when installing local Kubernetes components": {
|
|
||||||
clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")},
|
|
||||||
providerMetadata: &stubProviderMetadata{
|
|
||||||
selfResp: metadata.InstanceMetadata{
|
|
||||||
ProviderID: "provider-id",
|
|
||||||
Name: "metadata-name",
|
|
||||||
VPCIP: "192.0.2.1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
role: role.Worker,
|
|
||||||
wantComponentsFromCLI: true,
|
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"kubeadm join worker fails when retrieving self metadata": {
|
"kubeadm join worker fails when retrieving self metadata": {
|
||||||
@ -457,8 +441,6 @@ func TestJoinCluster(t *testing.T) {
|
|||||||
require.NoError(kubernetes.UnmarshalK8SResources(tc.clusterUtil.joinConfigs[0], &joinYaml))
|
require.NoError(kubernetes.UnmarshalK8SResources(tc.clusterUtil.joinConfigs[0], &joinYaml))
|
||||||
|
|
||||||
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
||||||
assert.Equal(tc.wantComponentsFromCLI, tc.clusterUtil.calledInstallComponentsFromCLI)
|
|
||||||
assert.Equal(!tc.wantComponentsFromCLI, tc.clusterUtil.calledInstallComponents)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -510,7 +492,6 @@ func TestK8sCompliantHostname(t *testing.T) {
|
|||||||
|
|
||||||
type stubClusterUtil struct {
|
type stubClusterUtil struct {
|
||||||
installComponentsErr error
|
installComponentsErr error
|
||||||
installComponentsFromCLIErr error
|
|
||||||
initClusterErr error
|
initClusterErr error
|
||||||
setupAutoscalingError error
|
setupAutoscalingError error
|
||||||
setupKonnectivityError error
|
setupKonnectivityError error
|
||||||
@ -521,9 +502,6 @@ type stubClusterUtil struct {
|
|||||||
joinClusterErr error
|
joinClusterErr error
|
||||||
startKubeletErr error
|
startKubeletErr error
|
||||||
|
|
||||||
calledInstallComponents bool
|
|
||||||
calledInstallComponentsFromCLI bool
|
|
||||||
|
|
||||||
initConfigs [][]byte
|
initConfigs [][]byte
|
||||||
joinConfigs [][]byte
|
joinConfigs [][]byte
|
||||||
}
|
}
|
||||||
@ -532,16 +510,10 @@ func (s *stubClusterUtil) SetupKonnectivity(kubectl k8sapi.Client, konnectivityA
|
|||||||
return s.setupKonnectivityError
|
return s.setupKonnectivityError
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClusterUtil) InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error {
|
func (s *stubClusterUtil) InstallComponents(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
||||||
s.calledInstallComponents = true
|
|
||||||
return s.installComponentsErr
|
return s.installComponentsErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClusterUtil) InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
|
||||||
s.calledInstallComponentsFromCLI = true
|
|
||||||
return s.installComponentsFromCLIErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error {
|
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error {
|
||||||
s.initConfigs = append(s.initConfigs, initConfig)
|
s.initConfigs = append(s.initConfigs, initConfig)
|
||||||
return s.initClusterErr
|
return s.initClusterErr
|
||||||
|
@ -28,8 +28,6 @@ rules:
|
|||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- "update.edgeless.systems"
|
- "update.edgeless.systems"
|
||||||
resources:
|
resources:
|
||||||
|
@ -28,8 +28,6 @@ rules:
|
|||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- "update.edgeless.systems"
|
- "update.edgeless.systems"
|
||||||
resources:
|
resources:
|
||||||
|
@ -28,8 +28,6 @@ rules:
|
|||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- "update.edgeless.systems"
|
- "update.edgeless.systems"
|
||||||
resources:
|
resources:
|
||||||
|
@ -28,8 +28,6 @@ rules:
|
|||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- "update.edgeless.systems"
|
- "update.edgeless.systems"
|
||||||
resources:
|
resources:
|
||||||
|
@ -16,7 +16,6 @@ import (
|
|||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -74,15 +73,6 @@ func (c *Client) getConfigMapData(ctx context.Context, name, key string) (string
|
|||||||
return cm.Data[key], nil
|
return cm.Data[key], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConfigMap creates the provided configmap.
|
|
||||||
func (c *Client) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error {
|
|
||||||
_, err := c.client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Create(ctx, &configMap, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create configmap: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNodeToJoiningNodes adds the provided node as a joining node CRD.
|
// AddNodeToJoiningNodes adds the provided node as a joining node CRD.
|
||||||
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
|
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
|
||||||
joiningNode := &unstructured.Unstructured{}
|
joiningNode := &unstructured.Unstructured{}
|
||||||
@ -138,20 +128,6 @@ func (c *Client) addWorkerToJoiningNodes(ctx context.Context, joiningNode *unstr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddReferenceToK8sVersionConfigMap adds a reference to the provided configmap to the k8s version configmap.
|
|
||||||
func (c *Client) AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error {
|
|
||||||
cm, err := c.client.CoreV1().ConfigMaps("kube-system").Get(ctx, k8sVersionsConfigMapName, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get configmap: %w", err)
|
|
||||||
}
|
|
||||||
cm.Data[constants.K8sComponentsFieldName] = componentsConfigMapName
|
|
||||||
_, err = c.client.CoreV1().ConfigMaps("kube-system").Update(ctx, cm, metav1.UpdateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to update configmap: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var validHostnameRegex = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
var validHostnameRegex = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
||||||
|
|
||||||
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
||||||
|
@ -8,16 +8,11 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
|
||||||
"net"
|
"net"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation"
|
"github.com/edgelesssys/constellation/v2/internal/attestation"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||||
@ -32,8 +27,6 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||||
)
|
)
|
||||||
@ -129,18 +122,7 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
|
|||||||
|
|
||||||
log.Infof("Querying K8sVersion ConfigMap for components ConfigMap name")
|
log.Infof("Querying K8sVersion ConfigMap for components ConfigMap name")
|
||||||
componentsConfigMapName, err := s.getK8sComponentsConfigMapName()
|
componentsConfigMapName, err := s.getK8sComponentsConfigMapName()
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
// If the file does not exist, the Constellation was initialized with a version before 2.3.0
|
|
||||||
// As a migration step, the join service will create the ConfigMap with the K8s components which
|
|
||||||
// match the K8s minor version of the cluster.
|
|
||||||
log.Warnf("Reference to K8sVersion ConfigMap does not exist, creating fallback Components ConfigMap and referencing it in K8sVersion ConfigMap")
|
|
||||||
log.Warnf("This is expected if the Constellation was initialized with a CLI before version 2.3.0")
|
|
||||||
log.Warnf("DEPRECATION WARNING: This is a migration step and will be removed in a future release")
|
|
||||||
componentsConfigMapName, err = s.createFallbackComponentsConfigMap(ctx, k8sVersion)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "unable to create fallback k8s components configmap: %s", err)
|
|
||||||
}
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, status.Errorf(codes.Internal, "unable to get components ConfigMap name: %s", err)
|
return nil, status.Errorf(codes.Internal, "unable to get components ConfigMap name: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,49 +226,6 @@ func (s *Server) getK8sComponentsConfigMapName() (string, error) {
|
|||||||
return componentsConfigMapName, nil
|
return componentsConfigMapName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function mimics the creation of the components ConfigMap which is now done in the bootstrapper
|
|
||||||
// during the first initialization of the Constellation .
|
|
||||||
// For more information see setupK8sVersionConfigMap() in bootstrapper/internal/kubernetes/kubernetes.go.
|
|
||||||
// This is a migration step and will be removed in a future release.
|
|
||||||
func (s *Server) createFallbackComponentsConfigMap(ctx context.Context, k8sVersion string) (string, error) {
|
|
||||||
validK8sVersion, err := versions.NewValidK8sVersion(k8sVersion)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("could not create fallback components config map: %w", err)
|
|
||||||
}
|
|
||||||
components := versions.VersionConfigs[validK8sVersion].KubernetesComponents
|
|
||||||
componentsMarshalled, err := json.Marshal(components)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("marshalling component versions: %w", err)
|
|
||||||
}
|
|
||||||
componentsHash := components.GetHash()
|
|
||||||
componentConfigMapName := fmt.Sprintf("k8s-component-%s", strings.ReplaceAll(componentsHash, ":", "-"))
|
|
||||||
|
|
||||||
componentsConfig := corev1.ConfigMap{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
APIVersion: "v1",
|
|
||||||
Kind: "ConfigMap",
|
|
||||||
},
|
|
||||||
Immutable: to.Ptr(true),
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: componentConfigMapName,
|
|
||||||
Namespace: "kube-system",
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
constants.K8sComponentsFieldName: string(componentsMarshalled),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.kubeClient.CreateConfigMap(ctx, componentsConfig); err != nil {
|
|
||||||
return "", fmt.Errorf("creating fallback components config map: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.kubeClient.AddReferenceToK8sVersionConfigMap(ctx, "k8s-version", componentConfigMapName); err != nil {
|
|
||||||
return "", fmt.Errorf("adding reference to fallback components config map: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return componentConfigMapName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// joinTokenGetter returns Kubernetes bootstrap (join) tokens.
|
// joinTokenGetter returns Kubernetes bootstrap (join) tokens.
|
||||||
type joinTokenGetter interface {
|
type joinTokenGetter interface {
|
||||||
// GetJoinToken returns a bootstrap (join) token.
|
// GetJoinToken returns a bootstrap (join) token.
|
||||||
@ -309,7 +248,5 @@ type certificateAuthority interface {
|
|||||||
|
|
||||||
type kubeClient interface {
|
type kubeClient interface {
|
||||||
GetComponents(ctx context.Context, configMapName string) (versions.ComponentVersions, error)
|
GetComponents(ctx context.Context, configMapName string) (versions.ComponentVersions, error)
|
||||||
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
|
|
||||||
AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error
|
AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error
|
||||||
AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error
|
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -81,16 +80,6 @@ func TestIssueJoinTicket(t *testing.T) {
|
|||||||
ca: stubCA{cert: testCert, nodeName: "node"},
|
ca: stubCA{cert: testCert, nodeName: "node"},
|
||||||
kubeClient: stubKubeClient{getComponentsVal: components},
|
kubeClient: stubKubeClient{getComponentsVal: components},
|
||||||
missingComponentsReferenceFile: true,
|
missingComponentsReferenceFile: true,
|
||||||
},
|
|
||||||
"worker node components reference missing and fallback fails": {
|
|
||||||
kubeadm: stubTokenGetter{token: testJoinToken},
|
|
||||||
kms: stubKeyGetter{dataKeys: map[string][]byte{
|
|
||||||
uuid: testKey,
|
|
||||||
attestation.MeasurementSecretContext: measurementSecret,
|
|
||||||
}},
|
|
||||||
ca: stubCA{cert: testCert, nodeName: "node"},
|
|
||||||
kubeClient: stubKubeClient{createConfigMapErr: someErr},
|
|
||||||
missingComponentsReferenceFile: true,
|
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"kubeclient fails": {
|
"kubeclient fails": {
|
||||||
@ -326,9 +315,6 @@ type stubKubeClient struct {
|
|||||||
getComponentsVal versions.ComponentVersions
|
getComponentsVal versions.ComponentVersions
|
||||||
getComponentsErr error
|
getComponentsErr error
|
||||||
|
|
||||||
createConfigMapErr error
|
|
||||||
|
|
||||||
addReferenceToK8sVersionConfigMapErr error
|
|
||||||
addNodeToJoiningNodesErr error
|
addNodeToJoiningNodesErr error
|
||||||
joiningNodeName string
|
joiningNodeName string
|
||||||
componentsHash string
|
componentsHash string
|
||||||
@ -338,14 +324,6 @@ func (s *stubKubeClient) GetComponents(ctx context.Context, configMapName string
|
|||||||
return s.getComponentsVal, s.getComponentsErr
|
return s.getComponentsVal, s.getComponentsErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubKubeClient) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error {
|
|
||||||
return s.createConfigMapErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubKubeClient) AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error {
|
|
||||||
return s.addReferenceToK8sVersionConfigMapErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
|
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
|
||||||
s.joiningNodeName = nodeName
|
s.joiningNodeName = nodeName
|
||||||
s.componentsHash = componentsHash
|
s.componentsHash = componentsHash
|
||||||
|
Loading…
Reference in New Issue
Block a user