mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-09-19 04:24:39 -04:00
Fix Konnectivity migration (#2633)
* helm: let cilium upgrade jump minor versions * cli: reconcile kubeadm cm to not have konnectivity
This commit is contained in:
parent
949186e5d7
commit
635a5d2c0a
5 changed files with 165 additions and 18 deletions
|
@ -22,6 +22,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -128,6 +129,12 @@ func (k *KubeCmd) UpgradeNodeVersion(ctx context.Context, conf *config.Config, f
|
|||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
|
||||
// TODO(3u13r): remove `reconcileKubeadmConfigMap` after v2.14.0 has been released.
|
||||
if err := k.reconcileKubeadmConfigMap(ctx); err != nil {
|
||||
return fmt.Errorf("reconciling kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.Version())
|
||||
nodeVersion.Spec.ImageReference = imageReference
|
||||
nodeVersion.Spec.ImageVersion = imageVersion.Version()
|
||||
|
@ -393,6 +400,44 @@ func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alph
|
|||
return updatedNodeVersion, err
|
||||
}
|
||||
|
||||
func (k *KubeCmd) reconcileKubeadmConfigMap(ctx context.Context) error {
|
||||
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting ClusterConfig: %w", err)
|
||||
}
|
||||
|
||||
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
|
||||
if v.Name == "konnectivity-uds" {
|
||||
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
|
||||
}
|
||||
}
|
||||
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
|
||||
if v.Name == "egress-config" {
|
||||
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
|
||||
}
|
||||
}
|
||||
delete(clusterConfiguration.APIServer.ExtraArgs, "egress-selector-config-file")
|
||||
|
||||
newConfigYAML, err := yaml.Marshal(clusterConfiguration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
if kubeadmConfig.Data[constants.ClusterConfigurationKey] == string(newConfigYAML) {
|
||||
k.log.Debugf("No changes to kubeadm config required")
|
||||
return nil
|
||||
}
|
||||
|
||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
||||
k.log.Debugf("Triggering kubeadm config update now")
|
||||
if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(k.outWriter, "Successfully reconciled the cluster's kubeadm config")
|
||||
return nil
|
||||
}
|
||||
|
||||
// isValidImageUpdate checks if the new image version is a valid upgrade, and there is no upgrade already running.
|
||||
func (k *KubeCmd) isValidImageUpgrade(nodeVersion updatev1alpha1.NodeVersion, newImageVersion string, force bool) error {
|
||||
if !force {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue