Microservice upgrades (#729)

Run with: constellation upgrade execute --helm.
This will only upgrade the helm charts. No config is needed.

Upgrades are implemented via helm's upgrade action, i.e. they
automatically roll back if something goes wrong. Releases could 
still be managed via helm, even after an upgrade with constellation
has been done.

Currently not user facing as CRD/CR backups are still in progress.
These backups should be automatically created and saved to the 
user's disk as updates may delete CRs. This happens implicitly 
through CRD upgrades, which are part of microservice upgrades.
This commit is contained in:
Otto Bittner 2022-12-19 16:52:15 +01:00 committed by GitHub
parent 990cae58a5
commit efcd0337b4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 498 additions and 166 deletions

View file

@ -8,6 +8,8 @@ package cmd
import (
"context"
"fmt"
"time"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -27,13 +29,28 @@ func newUpgradeExecuteCmd() *cobra.Command {
RunE: runUpgradeExecute,
}
cmd.Flags().Bool("helm", false, "Execute helm upgrade. This feature is still in development an may change without anounncement. Upgrades all helm charts deployed during constellation-init.")
cmd.Flags().Duration("timeout", 3*time.Minute, "Change helm upgrade timeout. This feature is still in development an may change without anounncement. Might be useful for slow connections or big clusters.")
if err := cmd.Flags().MarkHidden("helm"); err != nil {
panic(err)
}
if err := cmd.Flags().MarkHidden("timeout"); err != nil {
panic(err)
}
return cmd
}
func runUpgradeExecute(cmd *cobra.Command, args []string) error {
log, err := newCLILogger(cmd)
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs())
imageFetcher := image.New()
upgrader, err := cloudcmd.NewUpgrader(cmd.OutOrStdout())
upgrader, err := cloudcmd.NewUpgrader(cmd.OutOrStdout(), log)
if err != nil {
return err
}
@ -51,6 +68,21 @@ func upgradeExecute(cmd *cobra.Command, imageFetcher imageFetcher, upgrader clou
return displayConfigValidationErrors(cmd.ErrOrStderr(), err)
}
helm, err := cmd.Flags().GetBool("helm")
if err != nil {
return err
}
if helm {
timeout, err := cmd.Flags().GetDuration("timeout")
if err != nil {
return err
}
if err := upgrader.UpgradeHelmServices(cmd.Context(), conf, timeout); err != nil {
return fmt.Errorf("upgrading helm: %w", err)
}
return nil
}
// TODO: validate upgrade config? Should be basic things like checking image is not an empty string
// More sophisticated validation, like making sure we don't downgrade the cluster, should be done by `constellation upgrade plan`
@ -66,6 +98,7 @@ func upgradeExecute(cmd *cobra.Command, imageFetcher imageFetcher, upgrader clou
type cloudUpgrader interface {
Upgrade(ctx context.Context, imageReference, imageVersion string, measurements measurements.M) error
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration) error
}
type imageFetcher interface {