constellation/cli/internal/cmd/upgradecheck.go

736 lines
25 KiB
Go
Raw Normal View History

/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package cmd
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"path/filepath"
"sort"
"strings"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/api/fetcher"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/featureset"
"github.com/edgelesssys/constellation/v2/internal/constellation/helm"
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
"github.com/edgelesssys/constellation/v2/internal/file"
consemver "github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/edgelesssys/constellation/v2/internal/sigstore"
"github.com/edgelesssys/constellation/v2/internal/sigstore/keyselect"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/siderolabs/talos/pkg/machinery/config/encoder"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/mod/semver"
)
func newUpgradeCheckCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "check",
Short: "Check for possible upgrades",
Long: "Check which upgrades can be applied to your Constellation Cluster.",
Args: cobra.NoArgs,
RunE: runUpgradeCheck,
}
cmd.Flags().BoolP("update-config", "u", false, "update the specified config file with the suggested versions")
cmd.Flags().String("ref", versionsapi.ReleaseRef, "the reference to use for querying new versions")
cmd.Flags().String("stream", "stable", "the stream to use for querying new versions")
return cmd
}
type upgradeCheckFlags struct {
rootFlags
updateConfig bool
ref string
stream string
}
func (f *upgradeCheckFlags) parse(flags *pflag.FlagSet) error {
if err := f.rootFlags.parse(flags); err != nil {
return err
}
updateConfig, err := flags.GetBool("update-config")
if err != nil {
return fmt.Errorf("getting 'update-config' flag: %w", err)
}
f.updateConfig = updateConfig
f.ref, err = flags.GetString("ref")
if err != nil {
return fmt.Errorf("getting 'ref' flag: %w", err)
}
f.stream, err = flags.GetString("stream")
if err != nil {
return fmt.Errorf("getting 'stream' flag: %w", err)
}
return nil
}
func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
log, err := newCLILogger(cmd)
if err != nil {
return fmt.Errorf("creating logger: %w", err)
}
defer log.Sync()
var flags upgradeCheckFlags
if err := flags.parse(cmd.Flags()); err != nil {
return err
}
aws: use new LB controller to fix SecurityGroup cleanup on K8s service deletion (#2090) * add current chart add current helm chart * disable service controller for aws ccm * add new iam roles * doc AWS internet LB + add to LB test * pass clusterName to helm for AWS LB * fix update-aws-lb chart to also include .helmignore * move chart outside services * working state * add subnet tags for AWS subnet discovery * fix .helmignore load rule with file in subdirectory * upgrade iam profile * revert new loader impl since cilium is not correctly loaded * install chart if not already present during `upgrade apply` * cleanup PR + fix build + add todos cleanup PR + add todos * shared helm pkg for cli install and bootstrapper * add link to eks docs * refactor iamMigrationCmd * delete unused helm.symwallk * move iammigrate to upgrade pkg * fixup! delete unused helm.symwallk * add to upgradecheck * remove nodeSelector from go code (Otto) * update iam docs and sort permission + remove duplicate roles * fix bug in `upgrade check` * better upgrade check output when svc version upgrade not possible * pr feedback * remove force flag in upgrade_test * use upgrader.GetUpgradeID instead of extra type * remove todos + fix check * update doc lb (leo) * remove bootstrapper helm package * Update cli/internal/cmd/upgradecheck.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * final nits * add docs for e2e upgrade test setup * Apply suggestions from code review Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * Update cli/internal/helm/loader.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * Update cli/internal/cmd/tfmigrationclient.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * fix daniel review * link to the iam permissions instead of manually updating them (agreed with leo) * disable iam upgrade in upgrade apply --------- Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> Co-authored-by: Malte Poll
2023-07-24 04:30:53 -04:00
fileHandler := file.NewHandler(afero.NewOsFs())
upgradeID := generateUpgradeID(upgradeCmdKindCheck)
upgradeDir := filepath.Join(constants.UpgradeDir, upgradeID)
tfClient, cleanUp, err := cloudcmd.NewApplier(
cmd.Context(),
cmd.OutOrStdout(),
constants.TerraformWorkingDir,
upgradeDir,
flags.tfLogLevel,
fileHandler,
)
if err != nil {
return fmt.Errorf("setting up Terraform upgrader: %w", err)
}
defer cleanUp()
kubeConfig, err := fileHandler.Read(constants.AdminConfFilename)
if err != nil {
return fmt.Errorf("reading kubeconfig: %w", err)
}
kubeChecker, err := kubecmd.New(kubeConfig, log)
if err != nil {
aws: use new LB controller to fix SecurityGroup cleanup on K8s service deletion (#2090) * add current chart add current helm chart * disable service controller for aws ccm * add new iam roles * doc AWS internet LB + add to LB test * pass clusterName to helm for AWS LB * fix update-aws-lb chart to also include .helmignore * move chart outside services * working state * add subnet tags for AWS subnet discovery * fix .helmignore load rule with file in subdirectory * upgrade iam profile * revert new loader impl since cilium is not correctly loaded * install chart if not already present during `upgrade apply` * cleanup PR + fix build + add todos cleanup PR + add todos * shared helm pkg for cli install and bootstrapper * add link to eks docs * refactor iamMigrationCmd * delete unused helm.symwallk * move iammigrate to upgrade pkg * fixup! delete unused helm.symwallk * add to upgradecheck * remove nodeSelector from go code (Otto) * update iam docs and sort permission + remove duplicate roles * fix bug in `upgrade check` * better upgrade check output when svc version upgrade not possible * pr feedback * remove force flag in upgrade_test * use upgrader.GetUpgradeID instead of extra type * remove todos + fix check * update doc lb (leo) * remove bootstrapper helm package * Update cli/internal/cmd/upgradecheck.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * final nits * add docs for e2e upgrade test setup * Apply suggestions from code review Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * Update cli/internal/helm/loader.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * Update cli/internal/cmd/tfmigrationclient.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * fix daniel review * link to the iam permissions instead of manually updating them (agreed with leo) * disable iam upgrade in upgrade apply --------- Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> Co-authored-by: Malte Poll
2023-07-24 04:30:53 -04:00
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
}
helmClient, err := helm.NewReleaseVersionClient(kubeConfig, log)
if err != nil {
return fmt.Errorf("setting up helm client: %w", err)
}
versionfetcher := versionsapi.NewFetcher()
rekor, err := sigstore.NewRekor()
if err != nil {
return fmt.Errorf("constructing Rekor client: %w", err)
}
up := &upgradeCheckCmd{
canUpgradeCheck: featureset.CanUpgradeCheck,
collect: &versionCollector{
writer: cmd.OutOrStderr(),
kubeChecker: kubeChecker,
verListFetcher: versionfetcher,
fileHandler: fileHandler,
client: http.DefaultClient,
rekor: rekor,
flags: flags,
cliVersion: constants.BinaryVersion(),
helmClient: helmClient,
log: log,
versionsapi: versionfetcher,
},
cli: perform upgrades in-place in Terraform workspace (#2317) * perform upgrades in-place in terraform workspace Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * add iam upgrade apply test Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix linter Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make config fetcher stubbable Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * change workspace restoring behaviour Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwriting existing Terraform files Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwrites of TF variables Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix iam upgrade apply Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix embed directive Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make loader test less brittle Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * pass upgrade ID to user Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * naming nit Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * use upgradeDir Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * tidy Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> --------- Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
2023-09-14 05:51:20 -04:00
upgradeDir: upgradeDir,
terraformChecker: tfClient,
fileHandler: fileHandler,
flags: flags,
log: log,
}
return up.upgradeCheck(cmd, attestationconfigapi.NewFetcher())
}
type upgradeCheckCmd struct {
canUpgradeCheck bool
cli: perform upgrades in-place in Terraform workspace (#2317) * perform upgrades in-place in terraform workspace Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * add iam upgrade apply test Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix linter Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make config fetcher stubbable Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * change workspace restoring behaviour Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwriting existing Terraform files Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwrites of TF variables Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix iam upgrade apply Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix embed directive Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make loader test less brittle Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * pass upgrade ID to user Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * naming nit Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * use upgradeDir Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * tidy Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> --------- Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
2023-09-14 05:51:20 -04:00
upgradeDir string
collect collector
terraformChecker terraformChecker
fileHandler file.Handler
flags upgradeCheckFlags
log debugLog
}
// upgradePlan plans an upgrade of a Constellation cluster.
func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationconfigapi.Fetcher) error {
conf, err := config.New(u.fileHandler, constants.ConfigFilename, fetcher, u.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
cmd.PrintErrln(configValidationErr.LongMessage())
}
if err != nil {
return err
}
if !u.canUpgradeCheck {
cmd.PrintErrln("Planning Constellation upgrades automatically is not supported in the OSS build of the Constellation CLI. Consult the documentation for instructions on where to download the enterprise version.")
return errors.New("upgrade check is not supported")
}
// get current image version of the cluster
csp := conf.GetProvider()
2023-05-22 08:59:28 -04:00
attestationVariant := conf.GetAttestationConfig().GetVariant()
u.log.Debugf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())
current, err := u.collect.currentVersions(cmd.Context())
if err != nil {
return err
}
supported, err := u.collect.supportedVersions(cmd.Context(), current.image, current.k8s)
if err != nil {
return err
}
u.log.Debugf("Current cli version: %s", current.cli)
u.log.Debugf("Supported cli version(s): %s", supported.cli)
u.log.Debugf("Current service version: %s", current.service)
u.log.Debugf("Supported service version: %s", supported.service)
u.log.Debugf("Current k8s version: %s", current.k8s)
u.log.Debugf("Supported k8s version(s): %s", supported.k8s)
// Filter versions to only include upgrades
newServices := supported.service
if err := supported.service.IsUpgradeTo(current.service); err != nil {
newServices = consemver.Semver{}
u.log.Debugf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String())
}
newKubernetes := filterK8sUpgrades(current.k8s, supported.k8s)
semver.Sort(newKubernetes)
supported.image = filterImageUpgrades(current.image, supported.image)
2023-05-22 08:59:28 -04:00
newImages, err := u.collect.newMeasurements(cmd.Context(), csp, attestationVariant, supported.image)
if err != nil {
return err
}
u.log.Debugf("Planning Terraform migrations")
// Add manual migrations here if required
//
// var manualMigrations []terraform.StateMigration
// for _, migration := range manualMigrations {
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
// u.terraformChecker.AddManualStateMigration(migration)
// }
cmd.Println("The following Terraform migrations are available with this CLI:")
hasDiff, err := u.terraformChecker.Plan(cmd.Context(), conf)
if err != nil {
return fmt.Errorf("planning terraform migrations: %w", err)
}
defer func() {
cli: perform upgrades in-place in Terraform workspace (#2317) * perform upgrades in-place in terraform workspace Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * add iam upgrade apply test Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix linter Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make config fetcher stubbable Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * change workspace restoring behaviour Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwriting existing Terraform files Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwrites of TF variables Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix iam upgrade apply Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix embed directive Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make loader test less brittle Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * pass upgrade ID to user Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * naming nit Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * use upgradeDir Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * tidy Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> --------- Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
2023-09-14 05:51:20 -04:00
// User doesn't expect to see any changes in his workspace after an "upgrade plan",
// therefore, roll back to the backed up state.
if err := u.terraformChecker.RestoreWorkspace(); err != nil {
cli: perform upgrades in-place in Terraform workspace (#2317) * perform upgrades in-place in terraform workspace Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * add iam upgrade apply test Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * update buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix linter Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make config fetcher stubbable Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * change workspace restoring behaviour Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwriting existing Terraform files Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * allow overwrites of TF variables Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix iam upgrade apply Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * fix embed directive Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * make loader test less brittle Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * pass upgrade ID to user Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * naming nit Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * use upgradeDir Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * tidy Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> --------- Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
2023-09-14 05:51:20 -04:00
cmd.PrintErrf(
"restoring Terraform workspace: %s, restore the Terraform workspace manually from %s ",
err,
filepath.Join(u.upgradeDir, constants.TerraformUpgradeBackupDir),
)
}
}()
if !hasDiff {
cmd.Println(" No Terraform migrations are available.")
}
upgrade := versionUpgrade{
newServices: newServices,
newImages: newImages,
newKubernetes: newKubernetes,
newCLI: supported.cli,
newCompatibleCLI: supported.compatibleCLI,
currentServices: current.service,
currentImage: current.image,
currentKubernetes: current.k8s,
currentCLI: current.cli,
}
updateMsg, err := upgrade.buildString()
if err != nil {
return err
}
// Using Print over Println as buildString already includes a trailing newline where necessary.
cmd.Print(updateMsg)
if u.flags.updateConfig {
if err := upgrade.writeConfig(conf, u.fileHandler, constants.ConfigFilename); err != nil {
return fmt.Errorf("writing config: %w", err)
}
cmd.Println("Config updated successfully.")
}
return nil
}
func sortedMapKeys[T any](a map[string]T) []string {
keys := []string{}
for k := range a {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// filterImageUpgrades filters out image versions that are not valid upgrades.
func filterImageUpgrades(currentVersion consemver.Semver, newVersions []versionsapi.Version) []versionsapi.Version {
newImages := []versionsapi.Version{}
for i := range newVersions {
if err := compatibility.IsValidUpgrade(currentVersion.String(), newVersions[i].Version()); err != nil {
continue
}
newImages = append(newImages, newVersions[i])
}
return newImages
}
// filterK8sUpgrades filters out K8s versions that are not valid upgrades.
func filterK8sUpgrades(currentVersion consemver.Semver, newVersions []string) []string {
result := []string{}
for i := range newVersions {
if err := compatibility.IsValidUpgrade(currentVersion.String(), newVersions[i]); err != nil {
continue
}
result = append(result, newVersions[i])
}
return result
}
type collector interface {
currentVersions(ctx context.Context) (currentVersionInfo, error)
supportedVersions(ctx context.Context, currentImageVersion, currentK8sVersion consemver.Semver) (supportedVersionInfo, error)
newImages(ctx context.Context, currentImageVersion consemver.Semver) ([]versionsapi.Version, error)
2023-05-22 08:59:28 -04:00
newMeasurements(ctx context.Context, csp cloudprovider.Provider, attestationVariant variant.Variant, images []versionsapi.Version) (map[string]measurements.M, error)
newerVersions(ctx context.Context, allowedVersions []string) ([]versionsapi.Version, error)
newCLIVersions(ctx context.Context) ([]consemver.Semver, error)
filterCompatibleCLIVersions(ctx context.Context, cliPatchVersions []consemver.Semver, currentK8sVersion consemver.Semver) ([]consemver.Semver, error)
}
type versionCollector struct {
writer io.Writer
kubeChecker kubernetesChecker
verListFetcher versionListFetcher
fileHandler file.Handler
client *http.Client
rekor rekorVerifier
flags upgradeCheckFlags
versionsapi versionFetcher
cliVersion consemver.Semver
helmClient *helm.ReleaseVersionClient
log debugLog
}
func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovider.Provider, attestationVariant variant.Variant, versions []versionsapi.Version) (map[string]measurements.M, error) {
// get expected measurements for each image
upgrades := make(map[string]measurements.M)
for _, version := range versions {
v.log.Debugf("Fetching measurements for image: %s", version)
shortPath := version.ShortPath()
publicKey, err := keyselect.CosignPublicKeyForVersion(version)
if err != nil {
return nil, fmt.Errorf("getting public key: %w", err)
}
cosign, err := sigstore.NewCosignVerifier(publicKey)
if err != nil {
return nil, fmt.Errorf("setting public key: %w", err)
}
measurements, err := getCompatibleImageMeasurements(ctx, v.writer, v.client, cosign, v.rekor, csp, attestationVariant, version, v.log)
if err != nil {
if _, err := fmt.Fprintf(v.writer, "Skipping compatible image %q: %s\n", shortPath, err); err != nil {
return nil, fmt.Errorf("writing to buffer: %w", err)
}
continue
}
upgrades[shortPath] = measurements
}
v.log.Debugf("Compatible image measurements are %v", upgrades)
return upgrades, nil
}
type currentVersionInfo struct {
service consemver.Semver
image consemver.Semver
k8s consemver.Semver
cli consemver.Semver
}
func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionInfo, error) {
serviceVersions, err := v.helmClient.Versions()
if err != nil {
return currentVersionInfo{}, fmt.Errorf("getting service versions: %w", err)
}
clusterVersions, err := v.kubeChecker.GetConstellationVersion(ctx)
if err != nil {
return currentVersionInfo{}, fmt.Errorf("getting cluster versions: %w", err)
}
imageVersion, err := consemver.New(clusterVersions.ImageVersion())
if err != nil {
return currentVersionInfo{}, fmt.Errorf("parsing image semantic version: %w", err)
}
k8sVersion, err := consemver.New(clusterVersions.KubernetesVersion())
if err != nil {
return currentVersionInfo{}, fmt.Errorf("parsing Kubernetes semantic version: %w", err)
}
return currentVersionInfo{
service: serviceVersions.ConstellationServices(),
image: imageVersion,
k8s: k8sVersion,
cli: v.cliVersion,
}, nil
}
type supportedVersionInfo struct {
service consemver.Semver
image []versionsapi.Version
k8s []string
// CLI versions including those incompatible with the current Kubernetes version.
cli []consemver.Semver
// CLI versions compatible with the current Kubernetes version.
compatibleCLI []consemver.Semver
}
// supportedVersions returns slices of supported versions.
func (v *versionCollector) supportedVersions(ctx context.Context, currentImageVersion, currentK8sVersion consemver.Semver) (supportedVersionInfo, error) {
k8sVersions := versions.SupportedK8sVersions()
imageVersions, err := v.newImages(ctx, currentImageVersion)
if err != nil {
return supportedVersionInfo{}, fmt.Errorf("loading image versions: %w", err)
}
cliVersions, err := v.newCLIVersions(ctx)
if err != nil {
return supportedVersionInfo{}, fmt.Errorf("loading cli versions: %w", err)
}
compatibleCLIVersions, err := v.filterCompatibleCLIVersions(ctx, cliVersions, currentK8sVersion)
if err != nil {
return supportedVersionInfo{}, fmt.Errorf("filtering cli versions: %w", err)
}
return supportedVersionInfo{
// Each CLI comes with a set of services that have the same version as the CLI.
service: constants.BinaryVersion(),
image: imageVersions,
k8s: k8sVersions,
cli: cliVersions,
compatibleCLI: compatibleCLIVersions,
}, nil
}
func (v *versionCollector) newImages(ctx context.Context, currentImageVersion consemver.Semver) ([]versionsapi.Version, error) {
// find compatible images
// image updates should always be possible for the current minor version of the cluster
// (e.g. 0.1.0 -> 0.1.1, 0.1.2, 0.1.3, etc.)
// additionally, we allow updates to the next minor version (e.g. 0.1.0 -> 0.2.0)
// if the CLI minor version is newer than the cluster minor version
currentImageMinorVer := semver.MajorMinor(currentImageVersion.String())
currentCLIMinorVer := semver.MajorMinor(v.cliVersion.String())
nextImageMinorVer, err := compatibility.NextMinorVersion(currentImageMinorVer)
if err != nil {
return nil, fmt.Errorf("calculating next image minor version: %w", err)
}
v.log.Debugf("Current image minor version is %s", currentImageMinorVer)
v.log.Debugf("Current CLI minor version is %s", currentCLIMinorVer)
v.log.Debugf("Next image minor version is %s", nextImageMinorVer)
allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer}
switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); {
case cliImageCompare < 0:
if !v.flags.force {
return nil, fmt.Errorf("cluster image version (%s) newer than CLI version (%s)", currentImageMinorVer, currentCLIMinorVer)
}
if _, err := fmt.Fprintln(v.writer, "WARNING: CLI version is older than cluster image version. Continuing due to force flag."); err != nil {
return nil, fmt.Errorf("writing to buffer: %w", err)
}
case cliImageCompare == 0:
allowedMinorVersions = []string{currentImageMinorVer}
case cliImageCompare > 0:
allowedMinorVersions = []string{currentImageMinorVer, nextImageMinorVer}
}
v.log.Debugf("Allowed minor versions are %#v", allowedMinorVersions)
newerImages, err := v.newerVersions(ctx, allowedMinorVersions)
if err != nil {
return nil, fmt.Errorf("newer versions: %w", err)
}
return newerImages, nil
}
func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions []string) ([]versionsapi.Version, error) {
var updateCandidates []versionsapi.Version
for _, minorVer := range allowedVersions {
patchList := versionsapi.List{
Ref: v.flags.ref,
Stream: v.flags.stream,
Base: minorVer,
Granularity: versionsapi.GranularityMinor,
Kind: versionsapi.VersionKindImage,
}
patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList)
var notFound *fetcher.NotFoundError
if errors.As(err, &notFound) {
v.log.Debugf("Skipping version: %s", err)
continue
}
if err != nil {
return nil, fmt.Errorf("fetching version list: %w", err)
}
updateCandidates = append(updateCandidates, patchList.StructuredVersions()...)
}
v.log.Debugf("Update candidates are %v", updateCandidates)
return updateCandidates, nil
}
type versionUpgrade struct {
newServices consemver.Semver
newImages map[string]measurements.M
newKubernetes []string
newCLI []consemver.Semver
newCompatibleCLI []consemver.Semver
currentServices consemver.Semver
currentImage consemver.Semver
currentKubernetes consemver.Semver
currentCLI consemver.Semver
}
func (v *versionUpgrade) buildString() (string, error) {
upgradeMsg := strings.Builder{}
if len(v.newKubernetes) > 0 {
upgradeMsg.WriteString(fmt.Sprintf(" Kubernetes: %s --> %s\n", v.currentKubernetes, strings.Join(v.newKubernetes, " ")))
}
if len(v.newImages) > 0 {
imageMsgs := strings.Builder{}
newImagesSorted := sortedMapKeys(v.newImages)
for i, image := range newImagesSorted {
// prevent trailing newlines
if i > 0 {
imageMsgs.WriteString("\n")
}
content, err := encoder.NewEncoder(v.newImages[image]).Encode()
contentFormated := strings.ReplaceAll(string(content), "\n", "\n ")
if err != nil {
return "", fmt.Errorf("marshalling measurements: %w", err)
}
imageMsgs.WriteString(fmt.Sprintf(" %s --> %s\n Includes these measurements:\n %s", v.currentImage, image, contentFormated))
}
upgradeMsg.WriteString(" Images:\n")
upgradeMsg.WriteString(imageMsgs.String())
fmt.Fprintln(&upgradeMsg, "")
}
if v.newServices != (consemver.Semver{}) {
upgradeMsg.WriteString(fmt.Sprintf(" Services: %s --> %s\n", v.currentServices, v.newServices))
}
result := strings.Builder{}
if upgradeMsg.Len() > 0 {
result.WriteString("The following updates are available with this CLI:\n")
result.WriteString(upgradeMsg.String())
return result.String(), nil
}
// no upgrades available
if v.newServices == (consemver.Semver{}) && len(v.newImages) == 0 {
if len(v.newCompatibleCLI) > 0 {
result.WriteString(fmt.Sprintf("Newer CLI versions that are compatible with your cluster are: %s\n", strings.Join(consemver.ToStrings(v.newCompatibleCLI), " ")))
return result.String(), nil
} else if len(v.newCLI) > 0 {
result.WriteString(fmt.Sprintf("There are newer CLIs available (%s), however, you need to upgrade your cluster's Kubernetes version first.\n", strings.Join(consemver.ToStrings(v.newCLI), " ")))
return result.String(), nil
}
}
result.WriteString("You are up to date.\n")
return result.String(), nil
}
func (v *versionUpgrade) writeConfig(conf *config.Config, fileHandler file.Handler, configPath string) error {
// can't sort image map because maps are unsorted. services is only one string, k8s versions are sorted.
if v.newServices != (consemver.Semver{}) {
conf.MicroserviceVersion = v.newServices
}
if len(v.newKubernetes) > 0 {
var err error
conf.KubernetesVersion, err = versions.NewValidK8sVersion(v.newKubernetes[0], true)
if err != nil {
return fmt.Errorf("parsing Kubernetes version: %w", err)
}
}
if len(v.newImages) > 0 {
imageUpgrade := sortedMapKeys(v.newImages)[0]
conf.Image = imageUpgrade
conf.UpdateMeasurements(v.newImages[imageUpgrade])
}
return fileHandler.WriteYAML(configPath, conf, file.OptOverwrite)
}
// getCompatibleImageMeasurements retrieves the expected measurements for each image.
func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, client *http.Client, cosign sigstore.Verifier, rekor rekorVerifier,
csp cloudprovider.Provider, attestationVariant variant.Variant, version versionsapi.Version, log debugLog,
) (measurements.M, error) {
measurementsURL, signatureURL, err := versionsapi.MeasurementURL(version)
if err != nil {
return nil, err
}
var fetchedMeasurements measurements.M
log.Debugf("Fetching for measurement url: %s", measurementsURL)
hash, err := fetchedMeasurements.FetchAndVerify(
ctx, client, cosign,
measurementsURL,
signatureURL,
version,
csp,
attestationVariant,
)
if err != nil {
return nil, fmt.Errorf("fetching measurements: %w", err)
}
pubkey, err := keyselect.CosignPublicKeyForVersion(version)
if err != nil {
return nil, fmt.Errorf("getting public key: %w", err)
}
if err = sigstore.VerifyWithRekor(ctx, pubkey, rekor, hash); err != nil {
if _, err := fmt.Fprintf(writer, "Warning: Unable to verify '%s' in Rekor.\nMake sure measurements are correct.\n", hash); err != nil {
return nil, fmt.Errorf("writing to buffer: %w", err)
}
}
return fetchedMeasurements, nil
}
type versionFetcher interface {
FetchVersionList(ctx context.Context, list versionsapi.List) (versionsapi.List, error)
FetchCLIInfo(ctx context.Context, cliInfo versionsapi.CLIInfo) (versionsapi.CLIInfo, error)
}
// newCLIVersions returns a list of versions of the CLI which are a valid upgrade.
func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semver, error) {
list := versionsapi.List{
Ref: v.flags.ref,
Stream: v.flags.stream,
Granularity: versionsapi.GranularityMajor,
Base: fmt.Sprintf("v%d", constants.BinaryVersion().Major()),
Kind: versionsapi.VersionKindCLI,
}
minorList, err := v.versionsapi.FetchVersionList(ctx, list)
if err != nil {
return nil, fmt.Errorf("listing major versions: %w", err)
}
var patchVersions []string
for _, version := range minorList.Versions {
target, err := consemver.New(version)
if err != nil {
return nil, fmt.Errorf("parsing version %s: %w", version, err)
}
if err := target.IsUpgradeTo(v.cliVersion); err != nil {
v.log.Debugf("Skipping incompatible minor version %q: %s", version, err)
continue
}
list := versionsapi.List{
Ref: v.flags.ref,
Stream: v.flags.stream,
Granularity: versionsapi.GranularityMinor,
Base: version,
Kind: versionsapi.VersionKindCLI,
}
patchList, err := v.versionsapi.FetchVersionList(ctx, list)
if err != nil {
return nil, fmt.Errorf("listing minor versions for major version %s: %w", version, err)
}
patchVersions = append(patchVersions, patchList.Versions...)
}
out, err := consemver.NewSlice(patchVersions)
if err != nil {
return nil, fmt.Errorf("parsing versions: %w", err)
}
consemver.Sort(out)
return out, nil
}
// filterCompatibleCLIVersions filters a list of CLI versions which are compatible with the current Kubernetes version.
func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliPatchVersions []consemver.Semver, currentK8sVersion consemver.Semver,
) ([]consemver.Semver, error) {
// filter out invalid upgrades and versions which are not compatible with the current Kubernetes version
var compatibleVersions []consemver.Semver
for _, version := range cliPatchVersions {
if err := version.IsUpgradeTo(v.cliVersion); err != nil {
v.log.Debugf("Skipping incompatible patch version %q: %s", version, err)
continue
}
req := versionsapi.CLIInfo{
Ref: v.flags.ref,
Stream: v.flags.stream,
Version: version.String(),
}
info, err := v.versionsapi.FetchCLIInfo(ctx, req)
if err != nil {
return nil, fmt.Errorf("fetching CLI info: %w", err)
}
for _, k8sVersion := range info.Kubernetes {
k8sVersionSem, err := consemver.New(k8sVersion)
if err != nil {
return nil, fmt.Errorf("parsing Kubernetes version %s: %w", k8sVersion, err)
}
if k8sVersionSem.Compare(currentK8sVersion) == 0 {
compatibleVersions = append(compatibleVersions, version)
}
}
}
consemver.Sort(compatibleVersions)
return compatibleVersions, nil
}
type kubernetesChecker interface {
GetConstellationVersion(ctx context.Context) (kubecmd.NodeVersion, error)
}
type terraformChecker interface {
Plan(context.Context, *config.Config) (bool, error)
RestoreWorkspace() error
}
type versionListFetcher interface {
FetchVersionList(ctx context.Context, list versionsapi.List) (versionsapi.List, error)
}