mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-11 23:49:30 -05:00
join: deprecate components migration fallback (#833)
This commit is contained in:
parent
16b002ec1d
commit
0297aed1ea
@ -75,8 +75,8 @@ func NewKubernetesUtil() *KubernetesUtil {
|
||||
}
|
||||
}
|
||||
|
||||
// InstallComponentsFromCLI installs the kubernetes components passed from the CLI.
|
||||
func (k *KubernetesUtil) InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
||||
// InstallComponents installs the kubernetes components passed from the CLI.
|
||||
func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
||||
for _, component := range kubernetesComponents {
|
||||
if err := k.inst.Install(ctx, component); err != nil {
|
||||
return fmt.Errorf("installing kubernetes component from URL %s: %w", component.URL, err)
|
||||
@ -86,20 +86,6 @@ func (k *KubernetesUtil) InstallComponentsFromCLI(ctx context.Context, kubernete
|
||||
return enableSystemdUnit(ctx, kubeletServicePath)
|
||||
}
|
||||
|
||||
// InstallComponents installs kubernetes components in the version specified.
|
||||
// TODO(AB#2543,3u13r): Remove this function once the JoinService is extended.
|
||||
func (k *KubernetesUtil) InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error {
|
||||
versionConf := versions.VersionConfigs[version]
|
||||
|
||||
for _, component := range versionConf.KubernetesComponents {
|
||||
if err := k.inst.Install(ctx, component); err != nil {
|
||||
return fmt.Errorf("installing kubernetes component from URL %s: %w", component.URL, err)
|
||||
}
|
||||
}
|
||||
|
||||
return enableSystemdUnit(ctx, kubeletServicePath)
|
||||
}
|
||||
|
||||
// InitCluster instruments kubeadm to initialize the K8s cluster.
|
||||
func (k *KubernetesUtil) InitCluster(
|
||||
ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger,
|
||||
|
@ -18,8 +18,7 @@ import (
|
||||
)
|
||||
|
||||
type clusterUtil interface {
|
||||
InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error
|
||||
InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error
|
||||
InstallComponents(ctx context.Context, kubernetesComponents versions.ComponentVersions) error
|
||||
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error
|
||||
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
||||
FixCilium(log *logger.Logger)
|
||||
|
@ -97,7 +97,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
return nil, err
|
||||
}
|
||||
log.With(zap.String("version", string(k8sVersion))).Infof("Installing Kubernetes components")
|
||||
if err := k.clusterUtil.InstallComponentsFromCLI(ctx, kubernetesComponents); err != nil {
|
||||
if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -260,22 +260,10 @@ func (k *KubeWrapper) InitCluster(
|
||||
|
||||
// JoinCluster joins existing Kubernetes cluster.
|
||||
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, versionString string, k8sComponents versions.ComponentVersions, log *logger.Logger) error {
|
||||
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(k8sComponents) != 0 {
|
||||
log.With("k8sComponents", k8sComponents).Infof("Using provided kubernetes components")
|
||||
if err := k.clusterUtil.InstallComponentsFromCLI(ctx, k8sComponents); err != nil {
|
||||
log.With("k8sComponents", k8sComponents).Infof("Installing provided kubernetes components")
|
||||
if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil {
|
||||
return fmt.Errorf("installing kubernetes components: %w", err)
|
||||
}
|
||||
} else {
|
||||
log.With(zap.String("version", string(k8sVersion))).Infof("Installing Kubernetes components")
|
||||
if err := k.clusterUtil.InstallComponents(ctx, k8sVersion); err != nil {
|
||||
return fmt.Errorf("installing kubernetes components: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
||||
log.Infof("Retrieving node metadata")
|
||||
|
@ -299,7 +299,6 @@ func TestJoinCluster(t *testing.T) {
|
||||
wantConfig kubeadm.JoinConfiguration
|
||||
role role.Role
|
||||
k8sComponents versions.ComponentVersions
|
||||
wantComponentsFromCLI bool
|
||||
wantErr bool
|
||||
}{
|
||||
"kubeadm join worker works with metadata and remote Kubernetes Components": {
|
||||
@ -313,7 +312,6 @@ func TestJoinCluster(t *testing.T) {
|
||||
},
|
||||
k8sComponents: k8sComponents,
|
||||
role: role.Worker,
|
||||
wantComponentsFromCLI: true,
|
||||
wantConfig: kubeadm.JoinConfiguration{
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: joinCommand,
|
||||
@ -392,7 +390,7 @@ func TestJoinCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"kubeadm join worker fails when installing remote Kubernetes components": {
|
||||
clusterUtil: stubClusterUtil{installComponentsFromCLIErr: errors.New("error")},
|
||||
clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
selfResp: metadata.InstanceMetadata{
|
||||
ProviderID: "provider-id",
|
||||
@ -402,20 +400,6 @@ func TestJoinCluster(t *testing.T) {
|
||||
},
|
||||
k8sComponents: k8sComponents,
|
||||
role: role.Worker,
|
||||
wantComponentsFromCLI: true,
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm join worker fails when installing local Kubernetes components": {
|
||||
clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
selfResp: metadata.InstanceMetadata{
|
||||
ProviderID: "provider-id",
|
||||
Name: "metadata-name",
|
||||
VPCIP: "192.0.2.1",
|
||||
},
|
||||
},
|
||||
role: role.Worker,
|
||||
wantComponentsFromCLI: true,
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm join worker fails when retrieving self metadata": {
|
||||
@ -457,8 +441,6 @@ func TestJoinCluster(t *testing.T) {
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(tc.clusterUtil.joinConfigs[0], &joinYaml))
|
||||
|
||||
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
||||
assert.Equal(tc.wantComponentsFromCLI, tc.clusterUtil.calledInstallComponentsFromCLI)
|
||||
assert.Equal(!tc.wantComponentsFromCLI, tc.clusterUtil.calledInstallComponents)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -510,7 +492,6 @@ func TestK8sCompliantHostname(t *testing.T) {
|
||||
|
||||
type stubClusterUtil struct {
|
||||
installComponentsErr error
|
||||
installComponentsFromCLIErr error
|
||||
initClusterErr error
|
||||
setupAutoscalingError error
|
||||
setupKonnectivityError error
|
||||
@ -521,9 +502,6 @@ type stubClusterUtil struct {
|
||||
joinClusterErr error
|
||||
startKubeletErr error
|
||||
|
||||
calledInstallComponents bool
|
||||
calledInstallComponentsFromCLI bool
|
||||
|
||||
initConfigs [][]byte
|
||||
joinConfigs [][]byte
|
||||
}
|
||||
@ -532,16 +510,10 @@ func (s *stubClusterUtil) SetupKonnectivity(kubectl k8sapi.Client, konnectivityA
|
||||
return s.setupKonnectivityError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error {
|
||||
s.calledInstallComponents = true
|
||||
func (s *stubClusterUtil) InstallComponents(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
||||
return s.installComponentsErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error {
|
||||
s.calledInstallComponentsFromCLI = true
|
||||
return s.installComponentsFromCLIErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error {
|
||||
s.initConfigs = append(s.initConfigs, initConfig)
|
||||
return s.initClusterErr
|
||||
|
@ -28,8 +28,6 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- "update.edgeless.systems"
|
||||
resources:
|
||||
|
@ -28,8 +28,6 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- "update.edgeless.systems"
|
||||
resources:
|
||||
|
@ -28,8 +28,6 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- "update.edgeless.systems"
|
||||
resources:
|
||||
|
@ -28,8 +28,6 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- "update.edgeless.systems"
|
||||
resources:
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -74,15 +73,6 @@ func (c *Client) getConfigMapData(ctx context.Context, name, key string) (string
|
||||
return cm.Data[key], nil
|
||||
}
|
||||
|
||||
// CreateConfigMap creates the provided configmap.
|
||||
func (c *Client) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error {
|
||||
_, err := c.client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Create(ctx, &configMap, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create configmap: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddNodeToJoiningNodes adds the provided node as a joining node CRD.
|
||||
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
|
||||
joiningNode := &unstructured.Unstructured{}
|
||||
@ -138,20 +128,6 @@ func (c *Client) addWorkerToJoiningNodes(ctx context.Context, joiningNode *unstr
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddReferenceToK8sVersionConfigMap adds a reference to the provided configmap to the k8s version configmap.
|
||||
func (c *Client) AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error {
|
||||
cm, err := c.client.CoreV1().ConfigMaps("kube-system").Get(ctx, k8sVersionsConfigMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get configmap: %w", err)
|
||||
}
|
||||
cm.Data[constants.K8sComponentsFieldName] = componentsConfigMapName
|
||||
_, err = c.client.CoreV1().ConfigMaps("kube-system").Update(ctx, cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update configmap: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var validHostnameRegex = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
||||
|
||||
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
||||
|
@ -8,16 +8,11 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||
@ -32,8 +27,6 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/status"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
@ -129,18 +122,7 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
|
||||
|
||||
log.Infof("Querying K8sVersion ConfigMap for components ConfigMap name")
|
||||
componentsConfigMapName, err := s.getK8sComponentsConfigMapName()
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// If the file does not exist, the Constellation was initialized with a version before 2.3.0
|
||||
// As a migration step, the join service will create the ConfigMap with the K8s components which
|
||||
// match the K8s minor version of the cluster.
|
||||
log.Warnf("Reference to K8sVersion ConfigMap does not exist, creating fallback Components ConfigMap and referencing it in K8sVersion ConfigMap")
|
||||
log.Warnf("This is expected if the Constellation was initialized with a CLI before version 2.3.0")
|
||||
log.Warnf("DEPRECATION WARNING: This is a migration step and will be removed in a future release")
|
||||
componentsConfigMapName, err = s.createFallbackComponentsConfigMap(ctx, k8sVersion)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "unable to create fallback k8s components configmap: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "unable to get components ConfigMap name: %s", err)
|
||||
}
|
||||
|
||||
@ -244,49 +226,6 @@ func (s *Server) getK8sComponentsConfigMapName() (string, error) {
|
||||
return componentsConfigMapName, nil
|
||||
}
|
||||
|
||||
// This function mimics the creation of the components ConfigMap which is now done in the bootstrapper
|
||||
// during the first initialization of the Constellation .
|
||||
// For more information see setupK8sVersionConfigMap() in bootstrapper/internal/kubernetes/kubernetes.go.
|
||||
// This is a migration step and will be removed in a future release.
|
||||
func (s *Server) createFallbackComponentsConfigMap(ctx context.Context, k8sVersion string) (string, error) {
|
||||
validK8sVersion, err := versions.NewValidK8sVersion(k8sVersion)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not create fallback components config map: %w", err)
|
||||
}
|
||||
components := versions.VersionConfigs[validK8sVersion].KubernetesComponents
|
||||
componentsMarshalled, err := json.Marshal(components)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshalling component versions: %w", err)
|
||||
}
|
||||
componentsHash := components.GetHash()
|
||||
componentConfigMapName := fmt.Sprintf("k8s-component-%s", strings.ReplaceAll(componentsHash, ":", "-"))
|
||||
|
||||
componentsConfig := corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
Immutable: to.Ptr(true),
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: componentConfigMapName,
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string]string{
|
||||
constants.K8sComponentsFieldName: string(componentsMarshalled),
|
||||
},
|
||||
}
|
||||
|
||||
if err := s.kubeClient.CreateConfigMap(ctx, componentsConfig); err != nil {
|
||||
return "", fmt.Errorf("creating fallback components config map: %w", err)
|
||||
}
|
||||
|
||||
if err := s.kubeClient.AddReferenceToK8sVersionConfigMap(ctx, "k8s-version", componentConfigMapName); err != nil {
|
||||
return "", fmt.Errorf("adding reference to fallback components config map: %w", err)
|
||||
}
|
||||
|
||||
return componentConfigMapName, nil
|
||||
}
|
||||
|
||||
// joinTokenGetter returns Kubernetes bootstrap (join) tokens.
|
||||
type joinTokenGetter interface {
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
@ -309,7 +248,5 @@ type certificateAuthority interface {
|
||||
|
||||
type kubeClient interface {
|
||||
GetComponents(ctx context.Context, configMapName string) (versions.ComponentVersions, error)
|
||||
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
|
||||
AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error
|
||||
AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
@ -81,16 +80,6 @@ func TestIssueJoinTicket(t *testing.T) {
|
||||
ca: stubCA{cert: testCert, nodeName: "node"},
|
||||
kubeClient: stubKubeClient{getComponentsVal: components},
|
||||
missingComponentsReferenceFile: true,
|
||||
},
|
||||
"worker node components reference missing and fallback fails": {
|
||||
kubeadm: stubTokenGetter{token: testJoinToken},
|
||||
kms: stubKeyGetter{dataKeys: map[string][]byte{
|
||||
uuid: testKey,
|
||||
attestation.MeasurementSecretContext: measurementSecret,
|
||||
}},
|
||||
ca: stubCA{cert: testCert, nodeName: "node"},
|
||||
kubeClient: stubKubeClient{createConfigMapErr: someErr},
|
||||
missingComponentsReferenceFile: true,
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeclient fails": {
|
||||
@ -326,9 +315,6 @@ type stubKubeClient struct {
|
||||
getComponentsVal versions.ComponentVersions
|
||||
getComponentsErr error
|
||||
|
||||
createConfigMapErr error
|
||||
|
||||
addReferenceToK8sVersionConfigMapErr error
|
||||
addNodeToJoiningNodesErr error
|
||||
joiningNodeName string
|
||||
componentsHash string
|
||||
@ -338,14 +324,6 @@ func (s *stubKubeClient) GetComponents(ctx context.Context, configMapName string
|
||||
return s.getComponentsVal, s.getComponentsErr
|
||||
}
|
||||
|
||||
func (s *stubKubeClient) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error {
|
||||
return s.createConfigMapErr
|
||||
}
|
||||
|
||||
func (s *stubKubeClient) AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error {
|
||||
return s.addReferenceToK8sVersionConfigMapErr
|
||||
}
|
||||
|
||||
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
|
||||
s.joiningNodeName = nodeName
|
||||
s.componentsHash = componentsHash
|
||||
|
Loading…
Reference in New Issue
Block a user