cli: upgrade apply --force skips all compatibility checks (#1940)

* use force to skip compatibility and upgrade in progress check

* update doc

* fix tests

* add force check for helm and k8s

* add no-op check

* fix errors as
This commit is contained in:
Adrian Stobbe 2023-06-21 15:49:42 +02:00 committed by GitHub
parent c7d12055d1
commit 4546912f11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 95 additions and 39 deletions

View File

@ -117,18 +117,21 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
if conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP || conf.GetProvider() == cloudprovider.AWS {
err = u.handleServiceUpgrade(cmd, conf, flags)
upgradeErr := &compatibility.InvalidUpgradeError{}
noUpgradeRequiredError := &helm.NoUpgradeRequiredError{}
switch {
case errors.As(err, &upgradeErr):
case errors.As(err, upgradeErr):
cmd.PrintErrln(err)
case errors.As(err, noUpgradeRequiredError):
cmd.PrintErrln(err)
case err != nil:
return fmt.Errorf("upgrading services: %w", err)
}
err = u.upgrader.UpgradeNodeVersion(cmd.Context(), conf)
err = u.upgrader.UpgradeNodeVersion(cmd.Context(), conf, flags.force)
switch {
case errors.Is(err, kubernetes.ErrInProgress):
cmd.PrintErrln("Skipping image and Kubernetes upgrades. Another upgrade is in progress.")
case errors.As(err, &upgradeErr):
case errors.As(err, upgradeErr):
cmd.PrintErrln(err)
case err != nil:
return fmt.Errorf("upgrading NodeVersion: %w", err)
@ -348,7 +351,7 @@ func (u *upgradeApplyCmd) upgradeAttestConfigIfDiff(cmd *cobra.Command, newConfi
}
func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.Config, flags upgradeApplyFlags) error {
err := u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.DenyDestructive)
err := u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.DenyDestructive, flags.force)
if errors.Is(err, helm.ErrConfirmationMissing) {
if !flags.yes {
cmd.PrintErrln("WARNING: Upgrading cert-manager will destroy all custom resources you have manually created that are based on the current version of cert-manager.")
@ -361,7 +364,7 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.
return nil
}
}
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive)
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive, flags.force)
}
return err
@ -415,8 +418,8 @@ type upgradeApplyFlags struct {
}
type cloudUpgrader interface {
UpgradeNodeVersion(ctx context.Context, conf *config.Config) error
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
UpgradeNodeVersion(ctx context.Context, conf *config.Config, force bool) error
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool, force bool) error
UpdateAttestationConfig(ctx context.Context, newConfig config.AttestationCfg) error
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error)
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)

View File

@ -164,11 +164,11 @@ type stubUpgrader struct {
cleanTerraformErr error
}
func (u stubUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config) error {
func (u stubUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _ bool) error {
return u.nodeVersionErr
}
func (u stubUpgrader) UpgradeHelmServices(_ context.Context, _ *config.Config, _ time.Duration, _ bool) error {
func (u stubUpgrader) UpgradeHelmServices(_ context.Context, _ *config.Config, _ time.Duration, _, _ bool) error {
return u.helmErr
}

View File

@ -75,7 +75,7 @@ func NewClient(client crdClient, kubeConfigPath, helmNamespace string, log debug
return &Client{kubectl: client, fs: fileHandler, actions: actions{config: actionConfig}, log: log}, nil
}
func (c *Client) shouldUpgrade(releaseName, newVersion string) error {
func (c *Client) shouldUpgrade(releaseName, newVersion string, force bool) error {
currentVersion, err := c.currentVersion(releaseName)
if err != nil {
return fmt.Errorf("getting version for %s: %w", releaseName, err)
@ -85,8 +85,14 @@ func (c *Client) shouldUpgrade(releaseName, newVersion string) error {
// This may break for cert-manager or cilium if we decide to upgrade more than one minor version at a time.
// Leaving it as is since it is not clear to me what kind of sanity check we could do.
if err := compatibility.IsValidUpgrade(currentVersion, newVersion); err != nil {
return err
if currentVersion == newVersion {
return NoUpgradeRequiredError{}
}
if !force {
if err := compatibility.IsValidUpgrade(currentVersion, newVersion); err != nil {
return err
}
}
// at this point we conclude that the release should be upgraded. check that this CLI supports the upgrade.
if releaseName == constellationOperatorsInfo.releaseName || releaseName == constellationServicesInfo.releaseName {
@ -99,10 +105,17 @@ func (c *Client) shouldUpgrade(releaseName, newVersion string) error {
return nil
}
// NoUpgradeRequiredError is returned if the current version is the same as the target version.
type NoUpgradeRequiredError struct{}
func (e NoUpgradeRequiredError) Error() string {
return "no upgrade required since current version is the same as the target version"
}
// Upgrade runs a helm-upgrade on all deployments that are managed via Helm.
// If the CLI receives an interrupt signal it will cancel the context.
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool, upgradeID string) error {
func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive, force bool, upgradeID string) error {
upgradeErrs := []error{}
upgradeReleases := []*chart.Chart{}
invalidUpgrade := &compatibility.InvalidUpgradeError{}
@ -123,10 +136,13 @@ func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout tim
upgradeVersion = chart.Metadata.Version
}
err = c.shouldUpgrade(info.releaseName, upgradeVersion)
err = c.shouldUpgrade(info.releaseName, upgradeVersion, force)
noUpgradeRequired := &NoUpgradeRequiredError{}
switch {
case errors.As(err, &invalidUpgrade):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping %s upgrade: %w", info.releaseName, err))
case errors.As(err, &noUpgradeRequired):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping %s upgrade: %w", info.releaseName, err))
case err != nil:
return fmt.Errorf("should upgrade %s: %w", info.releaseName, err)
case err == nil:

View File

@ -48,7 +48,7 @@ func TestShouldUpgrade(t *testing.T) {
chart, err := loadChartsDir(helmFS, certManagerInfo.path)
require.NoError(err)
err = client.shouldUpgrade(certManagerInfo.releaseName, chart.Metadata.Version)
err = client.shouldUpgrade(certManagerInfo.releaseName, chart.Metadata.Version, false)
if tc.wantError {
tc.assertCorrectError(t, err)
return

View File

@ -182,13 +182,13 @@ func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, fileHandler fil
}
// UpgradeHelmServices upgrade helm services.
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive, u.upgradeID)
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool, force bool) error {
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive, force, u.upgradeID)
}
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
// The versions set in the config are validated against the versions running in the cluster.
func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config) error {
func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config, force bool) error {
provider := conf.GetProvider()
attestationVariant := conf.GetAttestationConfig().GetVariant()
region := conf.GetRegion()
@ -202,14 +202,15 @@ func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config)
return fmt.Errorf("parsing version from image short path: %w", err)
}
nodeVersion, err := u.checkClusterStatus(ctx)
nodeVersion, err := u.getClusterStatus(ctx)
if err != nil {
return err
}
upgradeErrs := []error{}
upgradeErr := &compatibility.InvalidUpgradeError{}
err = u.updateImage(&nodeVersion, imageReference, imageVersion.Version)
err = u.updateImage(&nodeVersion, imageReference, imageVersion.Version, force)
switch {
case errors.As(err, &upgradeErr):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
@ -226,7 +227,7 @@ func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config)
err = compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion, conf.KubernetesVersion, innerErr)
} else {
versionConfig := versions.VersionConfigs[currentK8sVersion]
components, err = u.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
components, err = u.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force)
}
switch {
@ -379,27 +380,26 @@ func (u *Upgrader) applyNodeVersion(ctx context.Context, nodeVersion updatev1alp
return updatedNodeVersion, nil
}
func (u *Upgrader) checkClusterStatus(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
func (u *Upgrader) getClusterStatus(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
if err != nil {
return updatev1alpha1.NodeVersion{}, fmt.Errorf("retrieving current image: %w", err)
}
if upgradeInProgress(nodeVersion) {
return updatev1alpha1.NodeVersion{}, ErrInProgress
}
return nodeVersion, nil
}
// updateImage upgrades the cluster to the given measurements and image.
func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImageReference, newImageVersion string) error {
func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImageReference, newImageVersion string, force bool) error {
currentImageVersion := nodeVersion.Spec.ImageVersion
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
return err
if !force {
if upgradeInProgress(*nodeVersion) {
return ErrInProgress
}
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
return fmt.Errorf("validating image update: %w", err)
}
}
u.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
nodeVersion.Spec.ImageReference = newImageReference
nodeVersion.Spec.ImageVersion = newImageVersion
@ -407,14 +407,15 @@ func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImage
return nil
}
func (u *Upgrader) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components) (*corev1.ConfigMap, error) {
func (u *Upgrader) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components, force bool) (*corev1.ConfigMap, error) {
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
if err != nil {
return nil, fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
}
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
return nil, err
if !force {
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
return nil, err
}
}
u.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
@ -549,7 +550,7 @@ func joinConfigMigration(existingConf *corev1.ConfigMap, attestVariant variant.V
}
type helmInterface interface {
Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool, upgradeID string) error
Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive, force bool, upgradeID string) error
}
type debugLog interface {

View File

@ -41,6 +41,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
badImageVersion string
currentClusterVersion string
conf *config.Config
force bool
getErr error
wantErr bool
wantUpdate bool
@ -139,6 +140,23 @@ func TestUpgradeNodeVersion(t *testing.T) {
return assert.ErrorIs(t, err, ErrInProgress)
},
},
"success with force and upgrade in progress": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.3"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
conditions: []metav1.Condition{{
Type: updatev1alpha1.ConditionOutdated,
Status: metav1.ConditionTrue,
}},
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{},
force: true,
wantUpdate: true,
},
"get error": {
conf: func() *config.Config {
conf := config.Default()
@ -173,6 +191,24 @@ func TestUpgradeNodeVersion(t *testing.T) {
return assert.ErrorAs(t, err, &upgradeErr)
},
},
"success with force and image too new": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.4.2"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
newImageReference: "path/to/image:v1.4.2",
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{
configMaps: map[string]*corev1.ConfigMap{
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
},
},
wantUpdate: true,
force: true,
},
"apply returns bad object": {
conf: func() *config.Config {
conf := config.Default()
@ -253,7 +289,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
outWriter: io.Discard,
}
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf)
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf, tc.force)
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
if tc.wantUpdate {
@ -428,7 +464,7 @@ func TestUpdateImage(t *testing.T) {
},
}
err := upgrader.updateImage(&nodeVersion, tc.newImageReference, tc.newImageVersion)
err := upgrader.updateImage(&nodeVersion, tc.newImageReference, tc.newImageVersion, false)
if tc.wantErr {
assert.Error(err)
@ -486,7 +522,7 @@ func TestUpdateK8s(t *testing.T) {
},
}
_, err := upgrader.updateK8s(&nodeVersion, tc.newClusterVersion, components.Components{})
_, err := upgrader.updateK8s(&nodeVersion, tc.newClusterVersion, components.Components{}, false)
if tc.wantErr {
assert.Error(err)