mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-02-23 08:20:15 -05:00
Fix Konnectivity migration (#2633)
* helm: let cilium upgrade jump minor versions * cli: reconcile kubeadm cm to not have konnectivity
This commit is contained in:
parent
949186e5d7
commit
635a5d2c0a
@ -115,7 +115,11 @@ func (a actionFactory) appendNewAction(release Release, configTargetVersion semv
|
||||
} else {
|
||||
// This may break for external chart dependencies if we decide to upgrade more than one minor version at a time.
|
||||
if err := newVersion.IsUpgradeTo(currentVersion); err != nil {
|
||||
return fmt.Errorf("invalid upgrade for %s: %w", release.ReleaseName, err)
|
||||
// TODO(3u13r): Remove when Constellation v2.14 is released.
|
||||
// We need to ignore that we jump from Cilium v1.12 to v1.15-pre. We have verified that this works.
|
||||
if !(errors.Is(err, compatibility.ErrMinorDrift) && release.ReleaseName == "cilium") {
|
||||
return fmt.Errorf("invalid upgrade for %s: %w", release.ReleaseName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ func TestHelmApply(t *testing.T) {
|
||||
if tc.clusterCertManagerVersion != nil {
|
||||
certManagerVersion = *tc.clusterCertManagerVersion
|
||||
}
|
||||
helmListVersion(lister, "cilium", "v1.12.1")
|
||||
helmListVersion(lister, "cilium", "v1.15.0-pre.2")
|
||||
helmListVersion(lister, "cert-manager", certManagerVersion)
|
||||
helmListVersion(lister, "constellation-services", tc.clusterMicroServiceVersion)
|
||||
helmListVersion(lister, "constellation-operators", tc.clusterMicroServiceVersion)
|
||||
|
@ -69,6 +69,7 @@ go_test(
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@io_k8s_apimachinery//pkg/runtime/schema",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3",
|
||||
"@io_k8s_sigs_yaml//:yaml",
|
||||
],
|
||||
)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -128,6 +129,12 @@ func (k *KubeCmd) UpgradeNodeVersion(ctx context.Context, conf *config.Config, f
|
||||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
|
||||
// TODO(3u13r): remove `reconcileKubeadmConfigMap` after v2.14.0 has been released.
|
||||
if err := k.reconcileKubeadmConfigMap(ctx); err != nil {
|
||||
return fmt.Errorf("reconciling kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.Version())
|
||||
nodeVersion.Spec.ImageReference = imageReference
|
||||
nodeVersion.Spec.ImageVersion = imageVersion.Version()
|
||||
@ -393,6 +400,44 @@ func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alph
|
||||
return updatedNodeVersion, err
|
||||
}
|
||||
|
||||
func (k *KubeCmd) reconcileKubeadmConfigMap(ctx context.Context) error {
|
||||
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting ClusterConfig: %w", err)
|
||||
}
|
||||
|
||||
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
|
||||
if v.Name == "konnectivity-uds" {
|
||||
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
|
||||
}
|
||||
}
|
||||
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
|
||||
if v.Name == "egress-config" {
|
||||
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
|
||||
}
|
||||
}
|
||||
delete(clusterConfiguration.APIServer.ExtraArgs, "egress-selector-config-file")
|
||||
|
||||
newConfigYAML, err := yaml.Marshal(clusterConfiguration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
if kubeadmConfig.Data[constants.ClusterConfigurationKey] == string(newConfigYAML) {
|
||||
k.log.Debugf("No changes to kubeadm config required")
|
||||
return nil
|
||||
}
|
||||
|
||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
||||
k.log.Debugf("Triggering kubeadm config update now")
|
||||
if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(k.outWriter, "Successfully reconciled the cluster's kubeadm config")
|
||||
return nil
|
||||
}
|
||||
|
||||
// isValidImageUpdate checks if the new image version is a valid upgrade, and there is no upgrade already running.
|
||||
func (k *KubeCmd) isValidImageUpgrade(nodeVersion updatev1alpha1.NodeVersion, newImageVersion string, force bool) error {
|
||||
if !force {
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -34,9 +35,61 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestUpgradeNodeVersion(t *testing.T) {
|
||||
clusterConf := kubeadmv1beta3.ClusterConfiguration{
|
||||
APIServer: kubeadmv1beta3.APIServer{
|
||||
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{},
|
||||
ExtraVolumes: []kubeadmv1beta3.HostPathMount{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
clusterConfBytes, err := json.Marshal(clusterConf)
|
||||
require.NoError(t, err)
|
||||
validKubeadmConfig := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.KubeadmConfigMap,
|
||||
},
|
||||
Data: map[string]string{
|
||||
constants.ClusterConfigurationKey: string(clusterConfBytes),
|
||||
},
|
||||
}
|
||||
|
||||
clusterConfWithKonnectivity := kubeadmv1beta3.ClusterConfiguration{
|
||||
APIServer: kubeadmv1beta3.APIServer{
|
||||
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"egress-selector-config-file": "/etc/kubernetes/egress-selector-config-file.yaml",
|
||||
},
|
||||
ExtraVolumes: []kubeadmv1beta3.HostPathMount{
|
||||
{
|
||||
Name: "egress-config",
|
||||
HostPath: "/etc/kubernetes/egress-selector-config-file.yaml",
|
||||
},
|
||||
{
|
||||
Name: "konnectivity-uds",
|
||||
HostPath: "/some/path/to/konnectivity-uds",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
clusterConfBytesWithKonnectivity, err := json.Marshal(clusterConfWithKonnectivity)
|
||||
require.NoError(t, err)
|
||||
validKubeadmConfigWithKonnectivity := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.KubeadmConfigMap,
|
||||
},
|
||||
Data: map[string]string{
|
||||
constants.ClusterConfigurationKey: string(clusterConfBytesWithKonnectivity),
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
kubectl *stubKubectl
|
||||
conditions []metav1.Condition
|
||||
@ -63,7 +116,25 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"success with konnectivity migration": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfigWithKonnectivity,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -79,7 +150,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -100,7 +172,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -119,8 +192,12 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{},
|
||||
wantErr: true,
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
@ -139,8 +216,12 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{},
|
||||
wantErr: true,
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, ErrInProgress)
|
||||
},
|
||||
@ -158,9 +239,13 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{},
|
||||
force: true,
|
||||
wantUpdate: true,
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
force: true,
|
||||
wantUpdate: true,
|
||||
},
|
||||
"get error": {
|
||||
conf: func() *config.Config {
|
||||
@ -173,7 +258,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
getCRErr: assert.AnError,
|
||||
@ -194,7 +280,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -216,7 +303,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -234,7 +322,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
badImageVersion: "v3.2.1",
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -255,7 +344,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: false,
|
||||
@ -276,7 +366,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: false, // because customClient is used
|
||||
@ -346,6 +437,12 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
// The ConfigMap only exists in the updatedConfigMaps map it needed to remove the Konnectivity values
|
||||
if strings.Contains(tc.kubectl.configMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds") {
|
||||
assert.NotContains(tc.kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds")
|
||||
assert.NotContains(tc.kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-config")
|
||||
assert.NotContains(tc.kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-selector-config-file")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user