mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-03-02 11:49:33 -05:00
versions: add k8s 1.31, remove k8s 1.28 (#3396)
* constellation-node-operator: upgrade control plane nodes first (#3663) * versions: add k8s 1.31, remove k8s 1.28 * e2e: set default k8s version for daily to 1.30 * e2e: remove defaults for required arguments * versions: move 1.31 to the end of the list * kubernetes: set feature gate ControlPlaneKubeletLocalMode Co-Authored-By: Leonard Cohnen <lc@edgeless.systems> Co-authored-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
This commit is contained in:
parent
fb609b23b0
commit
473b04abd5
2
.github/workflows/e2e-test-daily.yml
vendored
2
.github/workflows/e2e-test-daily.yml
vendored
@ -45,7 +45,7 @@ jobs:
|
||||
fail-fast: false
|
||||
max-parallel: 5
|
||||
matrix:
|
||||
kubernetesVersion: ["1.28"] # should be default
|
||||
kubernetesVersion: ["1.30"] # This should correspond to the current default k8s minor.
|
||||
attestationVariant: ["gcp-sev-es", "gcp-sev-snp", "azure-sev-snp", "azure-tdx", "aws-sev-snp"]
|
||||
refStream: ["ref/main/stream/debug/?", "ref/release/stream/stable/?"]
|
||||
test: ["sonobuoy quick"]
|
||||
|
1
.github/workflows/e2e-test-internal-lb.yml
vendored
1
.github/workflows/e2e-test-internal-lb.yml
vendored
@ -41,7 +41,6 @@ on:
|
||||
required: true
|
||||
kubernetesVersion:
|
||||
description: "Kubernetes version to create the cluster from."
|
||||
default: "1.28"
|
||||
required: true
|
||||
cliVersion:
|
||||
description: "Version of a released CLI to download. Leave empty to build the CLI from the checked out ref."
|
||||
|
@ -41,7 +41,6 @@ on:
|
||||
required: true
|
||||
kubernetesVersion:
|
||||
description: "Kubernetes version to create the cluster from."
|
||||
default: "1.28"
|
||||
required: true
|
||||
cliVersion:
|
||||
description: "Version of a released CLI to download. Leave empty to build the CLI from the checked out ref."
|
||||
|
20
.github/workflows/e2e-test-release.yml
vendored
20
.github/workflows/e2e-test-release.yml
vendored
@ -73,53 +73,53 @@ jobs:
|
||||
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "gcp-sev-es"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "gcp-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "azure-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "azure-tdx"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "aws-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "gcp-sev-es"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "gcp-sev-snp"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "azure-sev-snp"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "azure-tdx"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy full"
|
||||
attestationVariant: "aws-sev-snp"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
runner: "ubuntu-24.04"
|
||||
clusterCreation: "cli"
|
||||
|
||||
|
@ -41,7 +41,6 @@ on:
|
||||
required: true
|
||||
kubernetesVersion:
|
||||
description: "Kubernetes version to create the cluster from."
|
||||
default: "1.28"
|
||||
required: true
|
||||
releaseVersion:
|
||||
description: "Version of a released provider to download. Leave empty to build the provider from the checked out ref."
|
||||
|
30
.github/workflows/e2e-test-weekly.yml
vendored
30
.github/workflows/e2e-test-weekly.yml
vendored
@ -89,53 +89,53 @@ jobs:
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "gcp-sev-es"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "gcp-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "azure-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "azure-tdx"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "aws-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "gcp-sev-es"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "gcp-sev-snp"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "azure-sev-snp"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "azure-tdx"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
clusterCreation: "cli"
|
||||
- test: "sonobuoy quick"
|
||||
refStream: "ref/main/stream/debug/?"
|
||||
attestationVariant: "aws-sev-snp"
|
||||
kubernetes-version: "v1.28"
|
||||
kubernetes-version: "v1.29"
|
||||
clusterCreation: "cli"
|
||||
|
||||
|
||||
@ -290,27 +290,27 @@ jobs:
|
||||
- test: "verify"
|
||||
refStream: "ref/release/stream/stable/?"
|
||||
attestationVariant: "gcp-sev-es"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "verify"
|
||||
refStream: "ref/release/stream/stable/?"
|
||||
attestationVariant: "gcp-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "verify"
|
||||
refStream: "ref/release/stream/stable/?"
|
||||
attestationVariant: "azure-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "verify"
|
||||
refStream: "ref/release/stream/stable/?"
|
||||
attestationVariant: "azure-tdx"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
- test: "verify"
|
||||
refStream: "ref/release/stream/stable/?"
|
||||
attestationVariant: "aws-sev-snp"
|
||||
kubernetes-version: "v1.29"
|
||||
kubernetes-version: "v1.30"
|
||||
clusterCreation: "cli"
|
||||
|
||||
runs-on: ubuntu-24.04
|
||||
|
2
.github/workflows/e2e-test.yml
vendored
2
.github/workflows/e2e-test.yml
vendored
@ -44,7 +44,7 @@ on:
|
||||
required: true
|
||||
kubernetesVersion:
|
||||
description: "Kubernetes version to create the cluster from."
|
||||
default: "1.29"
|
||||
default: "1.30"
|
||||
required: true
|
||||
cliVersion:
|
||||
description: "Version of a released CLI to download. Leave empty to build the CLI from the checked out ref."
|
||||
|
@ -28,6 +28,7 @@ go_library(
|
||||
"@io_k8s_kubelet//config/v1beta1",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/constants",
|
||||
"@org_golang_x_mod//semver",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/certificate"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"golang.org/x/mod/semver"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconf "k8s.io/kubelet/config/v1beta1"
|
||||
@ -38,7 +39,7 @@ func (c *KubdeadmConfiguration) InitConfiguration(externalCloudProvider bool, cl
|
||||
cloudProvider = "external"
|
||||
}
|
||||
|
||||
return KubeadmInitYAML{
|
||||
initConfig := KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
@ -157,6 +158,11 @@ func (c *KubdeadmConfiguration) InitConfiguration(externalCloudProvider bool, cl
|
||||
TLSPrivateKeyFile: certificate.KeyFilename,
|
||||
},
|
||||
}
|
||||
|
||||
if semver.Compare(clusterVersion, "v1.31.0") >= 0 {
|
||||
initConfig.ClusterConfiguration.FeatureGates = map[string]bool{"ControlPlaneKubeletLocalMode": true}
|
||||
}
|
||||
return initConfig
|
||||
}
|
||||
|
||||
// JoinConfiguration returns a new kubeadm join configuration.
|
||||
|
@ -16,6 +16,6 @@ Subsequent Constellation releases drop support for the oldest (and deprecated) K
|
||||
The following Kubernetes versions are currently supported:
|
||||
<!--AUTO_GENERATED_BY_BAZEL-->
|
||||
<!--DO_NOT_EDIT-->
|
||||
* v1.28.15
|
||||
* v1.29.13
|
||||
* v1.30.9
|
||||
* v1.31.1
|
||||
|
@ -81,7 +81,7 @@ constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags]
|
||||
```
|
||||
-a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used
|
||||
-h, --help help for generate
|
||||
-k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.29")
|
||||
-k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.30")
|
||||
-t, --tags strings additional tags for created resources given a list of key=value
|
||||
```
|
||||
|
||||
|
@ -30,8 +30,11 @@ go_library(
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@io_k8s_apimachinery//pkg/runtime/schema",
|
||||
"@io_k8s_apimachinery//pkg/runtime/serializer/json",
|
||||
"@io_k8s_client_go//util/retry",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/scheme",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta4",
|
||||
"@io_k8s_sigs_yaml//:yaml",
|
||||
],
|
||||
)
|
||||
|
@ -42,9 +42,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
"k8s.io/client-go/util/retry"
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"sigs.k8s.io/yaml"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
|
||||
kubeadmv1beta4 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4"
|
||||
)
|
||||
|
||||
// ErrInProgress signals that an upgrade is in progress inside the cluster.
|
||||
@ -129,6 +131,18 @@ func (k *KubeCmd) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersio
|
||||
)
|
||||
}
|
||||
|
||||
// TODO(burgerdev): remove after releasing v2.19
|
||||
// Workaround for https://github.com/kubernetes/kubernetes/issues/127316: force kubelet to
|
||||
// connect to the local API server.
|
||||
if err := k.patchKubeadmConfig(ctx, func(cc *kubeadm.ClusterConfiguration) {
|
||||
if cc.FeatureGates == nil {
|
||||
cc.FeatureGates = map[string]bool{}
|
||||
}
|
||||
cc.FeatureGates["ControlPlaneKubeletLocalMode"] = true
|
||||
}); err != nil {
|
||||
return fmt.Errorf("setting FeatureGate ControlPlaneKubeletLocalMode: %w", err)
|
||||
}
|
||||
|
||||
versionConfig, ok := versions.VersionConfigs[kubernetesVersion]
|
||||
if !ok {
|
||||
return fmt.Errorf("skipping Kubernetes upgrade: %w", compatibility.NewInvalidUpgradeError(
|
||||
@ -234,48 +248,32 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At
|
||||
// ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs.
|
||||
// Empty strings are ignored, existing SANs are preserved.
|
||||
func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error {
|
||||
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting ClusterConfig: %w", err)
|
||||
}
|
||||
|
||||
existingSANs := make(map[string]struct{})
|
||||
for _, existingSAN := range clusterConfiguration.APIServer.CertSANs {
|
||||
existingSANs[existingSAN] = struct{}{}
|
||||
}
|
||||
|
||||
var missingSANs []string
|
||||
for _, san := range alternativeNames {
|
||||
if san == "" {
|
||||
continue // skip empty SANs
|
||||
if err := k.patchKubeadmConfig(ctx, func(clusterConfiguration *kubeadm.ClusterConfiguration) {
|
||||
existingSANs := make(map[string]struct{})
|
||||
for _, existingSAN := range clusterConfiguration.APIServer.CertSANs {
|
||||
existingSANs[existingSAN] = struct{}{}
|
||||
}
|
||||
if _, ok := existingSANs[san]; !ok {
|
||||
missingSANs = append(missingSANs, san)
|
||||
existingSANs[san] = struct{}{} // make sure we don't add the same SAN twice
|
||||
|
||||
var missingSANs []string
|
||||
for _, san := range alternativeNames {
|
||||
if san == "" {
|
||||
continue // skip empty SANs
|
||||
}
|
||||
if _, ok := existingSANs[san]; !ok {
|
||||
missingSANs = append(missingSANs, san)
|
||||
existingSANs[san] = struct{}{} // make sure we don't add the same SAN twice
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingSANs) == 0 {
|
||||
k.log.Debug("No new SANs to add to the cluster's apiserver SAN field")
|
||||
return nil
|
||||
}
|
||||
k.log.Debug("Extending the cluster's apiserver SAN field", "certSANs", strings.Join(missingSANs, ", "))
|
||||
if len(missingSANs) == 0 {
|
||||
k.log.Debug("No new SANs to add to the cluster's apiserver SAN field")
|
||||
}
|
||||
k.log.Debug("Extending the cluster's apiserver SAN field", "certSANs", strings.Join(missingSANs, ", "))
|
||||
|
||||
clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...)
|
||||
sort.Strings(clusterConfiguration.APIServer.CertSANs)
|
||||
|
||||
newConfigYAML, err := yaml.Marshal(clusterConfiguration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
||||
k.log.Debug("Triggering kubeadm config update now")
|
||||
if err = k.retryAction(ctx, func(ctx context.Context) error {
|
||||
_, err := k.kubectl.UpdateConfigMap(ctx, kubeadmConfig)
|
||||
return err
|
||||
clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...)
|
||||
sort.Strings(clusterConfiguration.APIServer.CertSANs)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
return fmt.Errorf("extending ClusterConfig.CertSANs: %w", err)
|
||||
}
|
||||
|
||||
k.log.Debug("Successfully extended the cluster's apiserver SAN field")
|
||||
@ -316,31 +314,6 @@ func (k *KubeCmd) getConstellationVersion(ctx context.Context) (updatev1alpha1.N
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// getClusterConfiguration fetches the kubeadm-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the ClusterConfiguration.
|
||||
func (k *KubeCmd) getClusterConfiguration(ctx context.Context) (kubeadmv1beta3.ClusterConfiguration, *corev1.ConfigMap, error) {
|
||||
var existingConf *corev1.ConfigMap
|
||||
if err := k.retryAction(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
existingConf, err = k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap)
|
||||
return err
|
||||
}); err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("retrieving current kubeadm-config: %w", err)
|
||||
}
|
||||
|
||||
clusterConf, ok := existingConf.Data[constants.ClusterConfigurationKey]
|
||||
if !ok {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, errors.New("ClusterConfiguration missing from kubeadm-config")
|
||||
}
|
||||
|
||||
var existingClusterConfig kubeadmv1beta3.ClusterConfiguration
|
||||
if err := yaml.Unmarshal([]byte(clusterConf), &existingClusterConfig); err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("unmarshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
return existingClusterConfig, existingConf, nil
|
||||
}
|
||||
|
||||
// applyComponentsCM applies the k8s components ConfigMap to the cluster.
|
||||
func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.ConfigMap) error {
|
||||
if err := k.retryAction(ctx, func(ctx context.Context) error {
|
||||
@ -468,6 +441,51 @@ func (k *KubeCmd) retryAction(ctx context.Context, action func(ctx context.Conte
|
||||
return retrier.Do(ctx)
|
||||
}
|
||||
|
||||
// patchKubeadmConfig fetches and unpacks the kube-system/kubeadm-config ClusterConfiguration entry,
|
||||
// runs doPatch on it and uploads the result.
|
||||
func (k *KubeCmd) patchKubeadmConfig(ctx context.Context, doPatch func(*kubeadm.ClusterConfiguration)) error {
|
||||
var kubeadmConfig *corev1.ConfigMap
|
||||
if err := k.retryAction(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
kubeadmConfig, err = k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap)
|
||||
return err
|
||||
}); err != nil {
|
||||
return fmt.Errorf("retrieving current kubeadm-config: %w", err)
|
||||
}
|
||||
|
||||
clusterConfigData, ok := kubeadmConfig.Data[constants.ClusterConfigurationKey]
|
||||
if !ok {
|
||||
return errors.New("ClusterConfiguration missing from kubeadm-config")
|
||||
}
|
||||
|
||||
var clusterConfiguration kubeadm.ClusterConfiguration
|
||||
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(clusterConfigData), &clusterConfiguration); err != nil {
|
||||
return fmt.Errorf("decoding cluster configuration data: %w", err)
|
||||
}
|
||||
|
||||
doPatch(&clusterConfiguration)
|
||||
|
||||
opt := k8sjson.SerializerOptions{Yaml: true}
|
||||
serializer := k8sjson.NewSerializerWithOptions(k8sjson.DefaultMetaFactory, kubeadmscheme.Scheme, kubeadmscheme.Scheme, opt)
|
||||
encoder := kubeadmscheme.Codecs.EncoderForVersion(serializer, kubeadmv1beta4.SchemeGroupVersion)
|
||||
newConfigYAML, err := runtime.Encode(encoder, &clusterConfiguration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
||||
k.log.Debug("Triggering kubeadm config update now")
|
||||
if err = k.retryAction(ctx, func(ctx context.Context) error {
|
||||
_, err := k.kubectl.UpdateConfigMap(ctx, kubeadmConfig)
|
||||
return err
|
||||
}); err != nil {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
k.log.Debug("Successfully patched the cluster's kubeadm-config")
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkForApplyError(expected, actual updatev1alpha1.NodeVersion) error {
|
||||
var err error
|
||||
switch {
|
||||
|
@ -281,6 +281,9 @@ func TestUpgradeKubernetesVersion(t *testing.T) {
|
||||
}
|
||||
kubectl := &stubKubectl{
|
||||
unstructuredInterface: unstructuredClient,
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: {Data: map[string]string{"ClusterConfiguration": kubeadmClusterConfigurationV1Beta4}},
|
||||
},
|
||||
}
|
||||
if tc.customClientFn != nil {
|
||||
kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
||||
@ -676,6 +679,50 @@ func TestRetryAction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtendClusterConfigCertSANs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
testCases := map[string]struct {
|
||||
clusterConfig string
|
||||
}{
|
||||
"kubeadmv1beta3.ClusterConfiguration": {
|
||||
clusterConfig: kubeadmClusterConfigurationV1Beta3,
|
||||
},
|
||||
"kubeadmv1beta4.ClusterConfiguration": {
|
||||
clusterConfig: kubeadmClusterConfigurationV1Beta4,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
kubectl := &fakeConfigMapClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: {Data: map[string]string{"ClusterConfiguration": tc.clusterConfig}},
|
||||
},
|
||||
}
|
||||
cmd := &KubeCmd{
|
||||
kubectl: kubectl,
|
||||
log: logger.NewTest(t),
|
||||
retryInterval: time.Millisecond,
|
||||
}
|
||||
|
||||
err := cmd.ExtendClusterConfigCertSANs(ctx, []string{"example.com"})
|
||||
require.NoError(err)
|
||||
|
||||
cm := kubectl.configMaps["kubeadm-config"]
|
||||
require.NotNil(cm)
|
||||
cc := cm.Data["ClusterConfiguration"]
|
||||
require.NotNil(cc)
|
||||
// Verify that SAN was added.
|
||||
assert.Contains(cc, "example.com")
|
||||
// Verify that config was written in v1beta4, regardless of the version read.
|
||||
assert.Contains(cc, "kubeadm.k8s.io/v1beta4")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeUnstructuredClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
@ -835,3 +882,83 @@ func supportedValidK8sVersions() (res []versions.ValidK8sVersion) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var kubeadmClusterConfigurationV1Beta3 = `
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
extraArgs:
|
||||
kubelet-certificate-authority: /etc/kubernetes/pki/ca.crt
|
||||
profiling: "false"
|
||||
extraVolumes:
|
||||
- hostPath: /var/log/kubernetes/audit/
|
||||
mountPath: /var/log/kubernetes/audit/
|
||||
name: audit-log
|
||||
pathType: DirectoryOrCreate
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
clusterName: test-55bbf58d
|
||||
controlPlaneEndpoint: 34.149.125.227:6443
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
cloud-provider: external
|
||||
dns:
|
||||
disabled: true
|
||||
encryptionAlgorithm: RSA-2048
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/etcd
|
||||
imageRepository: registry.k8s.io
|
||||
kubernetesVersion: v1.31.1
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
proxy:
|
||||
disabled: true
|
||||
scheduler:
|
||||
extraArgs:
|
||||
profiling: "false"
|
||||
`
|
||||
|
||||
var kubeadmClusterConfigurationV1Beta4 = `
|
||||
apiVersion: kubeadm.k8s.io/v1beta4
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
extraArgs:
|
||||
- name: kubelet-certificate-authority
|
||||
value: /etc/kubernetes/pki/ca.crt
|
||||
- name: profiling
|
||||
value: "false"
|
||||
extraVolumes:
|
||||
- hostPath: /var/log/kubernetes/audit/
|
||||
mountPath: /var/log/kubernetes/audit/
|
||||
name: audit-log
|
||||
pathType: DirectoryOrCreate
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
clusterName: test-55bbf58d
|
||||
controlPlaneEndpoint: 34.149.125.227:6443
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
- name: cloud-provider
|
||||
value: external
|
||||
dns:
|
||||
disabled: true
|
||||
encryptionAlgorithm: RSA-2048
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/etcd
|
||||
imageRepository: registry.k8s.io
|
||||
kubernetesVersion: v1.31.1
|
||||
networking:
|
||||
dnsDomain: cluster.local
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
proxy:
|
||||
disabled: true
|
||||
scheduler:
|
||||
extraArgs:
|
||||
- name: profiling
|
||||
value: "false"
|
||||
`
|
||||
|
@ -100,12 +100,12 @@ func ResolveK8sPatchVersion(k8sVersion string) (string, error) {
|
||||
// supported patch version as PATCH.
|
||||
func k8sVersionFromMajorMinor(version string) string {
|
||||
switch version {
|
||||
case semver.MajorMinor(string(V1_28)):
|
||||
return string(V1_28)
|
||||
case semver.MajorMinor(string(V1_29)):
|
||||
return string(V1_29)
|
||||
case semver.MajorMinor(string(V1_30)):
|
||||
return string(V1_30)
|
||||
case semver.MajorMinor(string(V1_31)):
|
||||
return string(V1_31)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
@ -181,14 +181,14 @@ const (
|
||||
|
||||
// currently supported versions.
|
||||
//nolint:revive
|
||||
V1_28 ValidK8sVersion = "v1.28.15" // renovate:kubernetes-release
|
||||
//nolint:revive
|
||||
V1_29 ValidK8sVersion = "v1.29.13" // renovate:kubernetes-release
|
||||
//nolint:revive
|
||||
V1_30 ValidK8sVersion = "v1.30.9" // renovate:kubernetes-release
|
||||
//nolint:revive
|
||||
V1_31 ValidK8sVersion = "v1.31.1" // renovate:kubernetes-release
|
||||
|
||||
// Default k8s version deployed by Constellation.
|
||||
Default ValidK8sVersion = V1_29
|
||||
Default ValidK8sVersion = V1_30
|
||||
)
|
||||
|
||||
// Regenerate the hashes by running go generate.
|
||||
@ -197,73 +197,6 @@ const (
|
||||
|
||||
// VersionConfigs holds download URLs for all required kubernetes components for every supported version.
|
||||
var VersionConfigs = map[ValidK8sVersion]KubernetesVersion{
|
||||
V1_28: {
|
||||
ClusterVersion: "v1.28.15", // renovate:kubernetes-release
|
||||
KubernetesComponents: components.Components{
|
||||
{
|
||||
Url: "https://github.com/containernetworking/plugins/releases/download/v1.6.2/cni-plugins-linux-amd64-v1.6.2.tgz", // renovate:cni-plugins-release
|
||||
Hash: "sha256:b8e811578fb66023f90d2e238d80cec3bdfca4b44049af74c374d4fae0f9c090",
|
||||
InstallPath: constants.CniPluginsDir,
|
||||
Extract: true,
|
||||
},
|
||||
{
|
||||
Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.32.0/crictl-v1.32.0-linux-amd64.tar.gz", // renovate:crictl-release
|
||||
Hash: "sha256:f050b71d3a73a91a4e0990b90143ed04dcd100cc66f953736fcb6a2730e283c4",
|
||||
InstallPath: constants.BinDir,
|
||||
Extract: true,
|
||||
},
|
||||
{
|
||||
Url: "https://dl.k8s.io/v1.28.15/bin/linux/amd64/kubelet", // renovate:kubernetes-release
|
||||
Hash: "sha256:b07a27fd5bd2419c9c623de15c1dd339af84eb27e9276c81070071065db00036",
|
||||
InstallPath: constants.KubeletPath,
|
||||
Extract: false,
|
||||
},
|
||||
{
|
||||
Url: "https://dl.k8s.io/v1.28.15/bin/linux/amd64/kubeadm", // renovate:kubernetes-release
|
||||
Hash: "sha256:0555b2c2fd30efcdb44b7fba5460c3dc3d3e39f2301e1eef7894a9f8976e1b4c",
|
||||
InstallPath: constants.KubeadmPath,
|
||||
Extract: false,
|
||||
},
|
||||
{
|
||||
Url: "https://dl.k8s.io/v1.28.15/bin/linux/amd64/kubectl", // renovate:kubernetes-release
|
||||
Hash: "sha256:1f7651ad0b50ef4561aa82e77f3ad06599b5e6b0b2a5fb6c4f474d95a77e41c5",
|
||||
InstallPath: constants.KubectlPath,
|
||||
Extract: false,
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjI4LjE1QHNoYTI1Njo2ZGZhODRmNWQ2YmU3MTFhZTBkMTk3NTgyMDFkMzM3ZTgzNmFiN2RlNzMzMDZmZjE0NzI1Y2VhYTk3OGZlYThmIn1d",
|
||||
InstallPath: patchFilePath("kube-apiserver"),
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjI4LjE1QHNoYTI1NjpkYWRkMmEzNzg0NzgzMDE4YTdlZTg1ODhkMTFmNzg3ZmVlNGQ1NDI0ZjJjZGQ2Y2U4OWQzYmExODQ0YTZjMTc1In1d",
|
||||
InstallPath: patchFilePath("kube-controller-manager"),
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjI4LjE1QHNoYTI1Njo4MmY0YTQyMzE3NDUwODU4ZDNkNzBmZGU1YjNjMGYyMjE1M2VhMTU1ZmQwNTNmMDk4NjU5OTlhNDY2MWYyZGNhIn1d",
|
||||
InstallPath: patchFilePath("kube-scheduler"),
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjE2LTBAc2hhMjU2OmM2YTlkMTFjYzVjMDRiMTE0Y2NkZWYzOWE5MjY1ZWVlZjgxOGUzZDAyZjUzNTliZTAzNWFlNzg0MDk3ZmRlYzUifV0=",
|
||||
InstallPath: patchFilePath("etcd"),
|
||||
},
|
||||
},
|
||||
// CloudControllerManagerImageAWS is the CCM image used on AWS.
|
||||
// Check for newer versions at https://github.com/kubernetes/cloud-provider-aws/releases.
|
||||
CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.10@sha256:582571fc487b4ba52fbafe4385daec5d97f57cdd7f3e901211eef6411b2a90a6", // renovate:container
|
||||
// CloudControllerManagerImageAzure is the CCM image used on Azure.
|
||||
// Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md.
|
||||
CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.28.14@sha256:6a906998155e3546f7832dd3bd3000654920ff3371a7499e225c549094957303", // renovate:container
|
||||
// CloudNodeManagerImageAzure is the cloud-node-manager image used on Azure.
|
||||
// Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md.
|
||||
CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.28.14@sha256:cf9ee6f4affbdd2a602fbf7bfeb69ebfd6de1ec2582c0e65e87bb061385fdbc2", // renovate:container
|
||||
// CloudControllerManagerImageGCP is the CCM image used on GCP.
|
||||
CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v28.10.0@sha256:f3b6fa7faea27b4a303c91b3bc7ee192b050e21e27579e9f3da90ae4ba38e626", // renovate:container
|
||||
// CloudControllerManagerImageOpenStack is the CCM image used on OpenStack.
|
||||
CloudControllerManagerImageOpenStack: "docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.26.4@sha256:05e846fb13481b6dbe4a1e50491feb219e8f5101af6cf662a086115735624db0", // renovate:container
|
||||
// External service image. Depends on k8s version.
|
||||
// Check for new versions at https://github.com/kubernetes/autoscaler/releases.
|
||||
ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.28.7@sha256:77906954da9171425c8c8d3286091818143b6dcf9039abd49b8f33f1502978a1", // renovate:container
|
||||
},
|
||||
V1_29: {
|
||||
ClusterVersion: "v1.29.13", // renovate:kubernetes-release
|
||||
KubernetesComponents: components.Components{
|
||||
@ -398,6 +331,73 @@ var VersionConfigs = map[ValidK8sVersion]KubernetesVersion{
|
||||
// Check for new versions at https://github.com/kubernetes/autoscaler/releases.
|
||||
ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.30.3@sha256:08fd86ee093760849ac4fd579eb90185b669fc20aa56c156aa34ea7b73dd5e34", // renovate:container
|
||||
},
|
||||
V1_31: {
|
||||
ClusterVersion: "v1.31.1", // renovate:kubernetes-release
|
||||
KubernetesComponents: components.Components{
|
||||
{
|
||||
Url: "https://github.com/containernetworking/plugins/releases/download/v1.6.2/cni-plugins-linux-amd64-v1.6.2.tgz", // renovate:cni-plugins-release
|
||||
Hash: "sha256:b8e811578fb66023f90d2e238d80cec3bdfca4b44049af74c374d4fae0f9c090",
|
||||
InstallPath: constants.CniPluginsDir,
|
||||
Extract: true,
|
||||
},
|
||||
{
|
||||
Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.31.1/crictl-v1.31.1-linux-amd64.tar.gz", // renovate:crictl-release
|
||||
Hash: "sha256:0a03ba6b1e4c253d63627f8d210b2ea07675a8712587e697657b236d06d7d231",
|
||||
InstallPath: constants.BinDir,
|
||||
Extract: true,
|
||||
},
|
||||
{
|
||||
Url: "https://dl.k8s.io/v1.31.1/bin/linux/amd64/kubelet", // renovate:kubernetes-release
|
||||
Hash: "sha256:50619fff95bdd7e690c049cc083f495ae0e7c66d0cdf6a8bcad298af5fe28438",
|
||||
InstallPath: constants.KubeletPath,
|
||||
Extract: false,
|
||||
},
|
||||
{
|
||||
Url: "https://dl.k8s.io/v1.31.1/bin/linux/amd64/kubeadm", // renovate:kubernetes-release
|
||||
Hash: "sha256:b3f92d19d482359116dd9ee9c0a10cb86e32a2a2aef79b853d5f07d6a093b0df",
|
||||
InstallPath: constants.KubeadmPath,
|
||||
Extract: false,
|
||||
},
|
||||
{
|
||||
Url: "https://dl.k8s.io/v1.31.1/bin/linux/amd64/kubectl", // renovate:kubernetes-release
|
||||
Hash: "sha256:57b514a7facce4ee62c93b8dc21fda8cf62ef3fed22e44ffc9d167eab843b2ae",
|
||||
InstallPath: constants.KubectlPath,
|
||||
Extract: false,
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjMxLjFAc2hhMjU2OjI0MDljMjNkYmI1YTJiN2E4MWFkYmIxODRkM2VhYzQzYWM2NTNlOWI5N2E3YzBlZTEyMWI4OWJiM2VmNjFmZGIifV0=",
|
||||
InstallPath: patchFilePath("kube-apiserver"),
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjMxLjFAc2hhMjU2OjlmOWRhNWIyN2UwM2Y4OTU5OWNjNDBiYTg5MTUwYWViZjNiNGNmZjAwMWU2ZGI2ZDk5ODY3NGIzNDE4MWUxYTEifV0=",
|
||||
InstallPath: patchFilePath("kube-controller-manager"),
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjMxLjFAc2hhMjU2Ojk2OWE3ZTk2MzQwZjNhOTI3YjNkNjUyNTgyZWRlYzJkNmQ4MmEwODM4NzFkODFlZjUwNjRiN2VkYWFiNDMwZDAifV0=",
|
||||
InstallPath: patchFilePath("kube-scheduler"),
|
||||
},
|
||||
{
|
||||
Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjE2LTBAc2hhMjU2OmM2YTlkMTFjYzVjMDRiMTE0Y2NkZWYzOWE5MjY1ZWVlZjgxOGUzZDAyZjUzNTliZTAzNWFlNzg0MDk3ZmRlYzUifV0=",
|
||||
InstallPath: patchFilePath("etcd"),
|
||||
},
|
||||
},
|
||||
// CloudControllerManagerImageAWS is the CCM image used on AWS.
|
||||
// Check for newer versions at https://github.com/kubernetes/cloud-provider-aws/releases.
|
||||
CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.31.4@sha256:47f861081efbc04bda32b6212ca2c74b5b2ce190e595a285e1b712ca0afec0c7", // renovate:container
|
||||
// CloudControllerManagerImageAzure is the CCM image used on Azure.
|
||||
// Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md.
|
||||
CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.31.1@sha256:b5aa55a7e9d38137f7fcd0adc9335b06e7c96061764addd7e6bb9f86403f0110", // renovate:container
|
||||
// CloudNodeManagerImageAzure is the cloud-node-manager image used on Azure.
|
||||
// Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md.
|
||||
CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.31.1@sha256:e9b522399e4ec6bc4ce90c173e59db135d742de7b16f0f5454b4d88ba78a98c7", // renovate:container
|
||||
// CloudControllerManagerImageGCP is the CCM image used on GCP.
|
||||
CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v30.1.0@sha256:64d2d5d4d2b5fb426c307c64ada9a61b64e797b56d9768363f145f2bd957998f", // renovate:container
|
||||
// CloudControllerManagerImageOpenStack is the CCM image used on OpenStack.
|
||||
CloudControllerManagerImageOpenStack: "registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.31.1@sha256:72cc0d22b83c613df809d8134e50404171513d92287e63e2313d9ad7e1ed630e", // renovate:container
|
||||
// External service image. Depends on k8s version.
|
||||
// Check for new versions at https://github.com/kubernetes/autoscaler/releases.
|
||||
ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.31.0@sha256:6d4c51c35f344d230341d71bb6d35f2c2f0c0a6f205a7887ae44e6d852fb5b5f", // renovate:container
|
||||
},
|
||||
}
|
||||
|
||||
// KubernetesVersion bundles download Urls to all version-releated binaries necessary for installing/deploying a particular Kubernetes version.
|
||||
|
@ -214,7 +214,7 @@ func (r *NodeVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return ctrl.Result{Requeue: shouldRequeue}, nil
|
||||
}
|
||||
|
||||
newNodeConfig := newNodeConfig{desiredNodeVersion, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget}
|
||||
newNodeConfig := newNodeConfig{desiredNodeVersion, groups.Outdated, groups.Donors, pendingNodeList.Items, scalingGroupByID, newNodesBudget}
|
||||
if err := r.createNewNodes(ctx, newNodeConfig); err != nil {
|
||||
logr.Error(err, "Creating new nodes")
|
||||
return ctrl.Result{Requeue: shouldRequeue}, nil
|
||||
@ -614,6 +614,15 @@ func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNo
|
||||
if config.newNodesBudget < 1 || len(config.outdatedNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
// We need to look at both the outdated nodes *and* the nodes that have already
|
||||
// been moved to the donors here because even if a CP node has already been moved to
|
||||
// the donors, we still want to defer worker upgrades until the new CP node is actually joined.
|
||||
hasOutdatedControlPlanes := false
|
||||
for _, entry := range append(config.outdatedNodes, config.donors...) {
|
||||
if nodeutil.IsControlPlaneNode(&entry) {
|
||||
hasOutdatedControlPlanes = true
|
||||
}
|
||||
}
|
||||
outdatedNodesPerScalingGroup := make(map[string]int)
|
||||
for _, node := range config.outdatedNodes {
|
||||
// skip outdated nodes that got assigned an heir in this Reconcile call
|
||||
@ -648,6 +657,12 @@ func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNo
|
||||
continue
|
||||
}
|
||||
if requiredNodesPerScalingGroup[scalingGroupID] == 0 {
|
||||
logr.Info("No new nodes needed for scaling group", "scalingGroup", scalingGroupID)
|
||||
continue
|
||||
}
|
||||
// if we are a worker group and still have outdated control planes, we must wait for them to be upgraded.
|
||||
if hasOutdatedControlPlanes && scalingGroup.Spec.Role != updatev1alpha1.ControlPlaneRole {
|
||||
logr.Info("There are still outdated control plane nodes which must be replaced first before this worker scaling group is upgraded", "scalingGroup", scalingGroupID)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
@ -679,7 +694,7 @@ func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNo
|
||||
if err := r.Create(ctx, pendingNode); err != nil {
|
||||
return err
|
||||
}
|
||||
logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID)
|
||||
logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID, "requiredNodes", requiredNodesPerScalingGroup[scalingGroupID])
|
||||
requiredNodesPerScalingGroup[scalingGroupID]--
|
||||
config.newNodesBudget--
|
||||
}
|
||||
@ -939,6 +954,7 @@ type kubernetesServerVersionGetter interface {
|
||||
type newNodeConfig struct {
|
||||
desiredNodeVersion updatev1alpha1.NodeVersion
|
||||
outdatedNodes []corev1.Node
|
||||
donors []corev1.Node
|
||||
pendingNodes []updatev1alpha1.PendingNode
|
||||
scalingGroupByID map[string]updatev1alpha1.ScalingGroup
|
||||
newNodesBudget int
|
||||
|
@ -330,6 +330,7 @@ func TestMatchDonorsAndHeirs(t *testing.T) {
|
||||
func TestCreateNewNodes(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
outdatedNodes []corev1.Node
|
||||
donors []corev1.Node
|
||||
pendingNodes []updatev1alpha1.PendingNode
|
||||
scalingGroupByID map[string]updatev1alpha1.ScalingGroup
|
||||
budget int
|
||||
@ -573,6 +574,105 @@ func TestCreateNewNodes(t *testing.T) {
|
||||
},
|
||||
budget: 1,
|
||||
},
|
||||
"control plane node upgraded first": {
|
||||
outdatedNodes: []corev1.Node{
|
||||
// CP node
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "control-plane-node",
|
||||
Annotations: map[string]string{
|
||||
scalingGroupAnnotation: "control-plane-scaling-group",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
// Mark this as a CP node as per
|
||||
// https://kubernetes.io/docs/reference/labels-annotations-taints/#node-role-kubernetes-io-control-plane
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Worker node
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Annotations: map[string]string{
|
||||
scalingGroupAnnotation: "scaling-group",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scalingGroupByID: map[string]updatev1alpha1.ScalingGroup{
|
||||
"scaling-group": {
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
GroupID: "scaling-group",
|
||||
Role: updatev1alpha1.WorkerRole,
|
||||
},
|
||||
Status: updatev1alpha1.ScalingGroupStatus{
|
||||
ImageReference: "image",
|
||||
},
|
||||
},
|
||||
"control-plane-scaling-group": {
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
GroupID: "control-plane-scaling-group",
|
||||
Role: updatev1alpha1.ControlPlaneRole,
|
||||
},
|
||||
Status: updatev1alpha1.ScalingGroupStatus{
|
||||
ImageReference: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
budget: 2,
|
||||
wantCreateCalls: []string{"control-plane-scaling-group"},
|
||||
},
|
||||
"worker not upgraded while cp is in donors": {
|
||||
donors: []corev1.Node{
|
||||
// CP node
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "control-plane-node",
|
||||
Annotations: map[string]string{
|
||||
scalingGroupAnnotation: "control-plane-scaling-group",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
// Mark this as a CP node as per
|
||||
// https://kubernetes.io/docs/reference/labels-annotations-taints/#node-role-kubernetes-io-control-plane
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
outdatedNodes: []corev1.Node{
|
||||
// Worker node
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Annotations: map[string]string{
|
||||
scalingGroupAnnotation: "scaling-group",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scalingGroupByID: map[string]updatev1alpha1.ScalingGroup{
|
||||
"scaling-group": {
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
GroupID: "scaling-group",
|
||||
Role: updatev1alpha1.WorkerRole,
|
||||
},
|
||||
Status: updatev1alpha1.ScalingGroupStatus{
|
||||
ImageReference: "image",
|
||||
},
|
||||
},
|
||||
"control-plane-scaling-group": {
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
GroupID: "control-plane-scaling-group",
|
||||
Role: updatev1alpha1.ControlPlaneRole,
|
||||
},
|
||||
Status: updatev1alpha1.ScalingGroupStatus{
|
||||
ImageReference: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
budget: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -592,7 +692,7 @@ func TestCreateNewNodes(t *testing.T) {
|
||||
},
|
||||
Scheme: getScheme(t),
|
||||
}
|
||||
newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.pendingNodes, tc.scalingGroupByID, tc.budget}
|
||||
newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.donors, tc.pendingNodes, tc.scalingGroupByID, tc.budget}
|
||||
err := reconciler.createNewNodes(context.Background(), newNodeConfig)
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantCreateCalls, reconciler.nodeReplacer.(*stubNodeReplacerWriter).createCalls)
|
||||
|
@ -69,7 +69,7 @@ resource "constellation_cluster" "azure_example" {
|
||||
See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview/clouds) that Constellation supports.
|
||||
- `image` (Attributes) Constellation OS Image to use on the nodes. (see [below for nested schema](#nestedatt--image))
|
||||
- `init_secret` (String) Secret used for initialization of the cluster.
|
||||
- `kubernetes_version` (String) The Kubernetes version to use for the cluster. The supported versions are [v1.28.15 v1.29.13 v1.30.9].
|
||||
- `kubernetes_version` (String) The Kubernetes version to use for the cluster. The supported versions are [v1.29.13 v1.30.9 v1.31.1].
|
||||
- `master_secret` (String) Hex-encoded 32-byte master secret for the cluster.
|
||||
- `master_secret_salt` (String) Hex-encoded 32-byte master secret salt for the cluster.
|
||||
- `measurement_salt` (String) Hex-encoded 32-byte measurement salt for the cluster.
|
||||
|
Loading…
x
Reference in New Issue
Block a user