mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-23 05:41:19 -05:00
cli: upgrade uses same helm releases as init (#2177)
This commit is contained in:
parent
2049713620
commit
4788467bca
@ -10,6 +10,7 @@ go_library(
|
||||
"iam.go",
|
||||
"patch.go",
|
||||
"rollback.go",
|
||||
"serviceaccount.go",
|
||||
"terminate.go",
|
||||
"terraform.go",
|
||||
"validators.go",
|
||||
@ -18,15 +19,19 @@ go_library(
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
"//cli/internal/clusterid",
|
||||
"//cli/internal/cmd/pathprefix",
|
||||
"//cli/internal/libvirt",
|
||||
"//cli/internal/terraform",
|
||||
"//internal/atls",
|
||||
"//internal/attestation/choose",
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/azureshared",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/cloud/gcpshared",
|
||||
"//internal/cloud/openstack",
|
||||
"//internal/config",
|
||||
"//internal/file",
|
||||
"//internal/imagefetcher",
|
||||
"//internal/role",
|
||||
"@com_github_azure_azure_sdk_for_go//profiles/latest/attestation/attestation",
|
||||
|
73
cli/internal/cloudcmd/serviceaccount.go
Normal file
73
cli/internal/cloudcmd/serviceaccount.go
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package cloudcmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
)
|
||||
|
||||
// GetMarshaledServiceAccountURI returns the service account URI for the given cloud provider.
|
||||
func GetMarshaledServiceAccountURI(provider cloudprovider.Provider, config *config.Config, pf pathprefix.PathPrefixer, log debugLog, fileHandler file.Handler,
|
||||
) (string, error) {
|
||||
log.Debugf("Getting service account URI")
|
||||
switch provider {
|
||||
case cloudprovider.GCP:
|
||||
log.Debugf("Handling case for GCP")
|
||||
log.Debugf("GCP service account key path %s", pf.PrefixPath(config.Provider.GCP.ServiceAccountKeyPath))
|
||||
|
||||
var key gcpshared.ServiceAccountKey
|
||||
if err := fileHandler.ReadJSON(config.Provider.GCP.ServiceAccountKeyPath, &key); err != nil {
|
||||
return "", fmt.Errorf("reading service account key from path %q: %w", pf.PrefixPath(config.Provider.GCP.ServiceAccountKeyPath), err)
|
||||
}
|
||||
log.Debugf("Read GCP service account key from path")
|
||||
return key.ToCloudServiceAccountURI(), nil
|
||||
|
||||
case cloudprovider.AWS:
|
||||
log.Debugf("Handling case for AWS")
|
||||
return "", nil // AWS does not need a service account URI
|
||||
case cloudprovider.Azure:
|
||||
log.Debugf("Handling case for Azure")
|
||||
|
||||
authMethod := azureshared.AuthMethodUserAssignedIdentity
|
||||
|
||||
creds := azureshared.ApplicationCredentials{
|
||||
TenantID: config.Provider.Azure.TenantID,
|
||||
Location: config.Provider.Azure.Location,
|
||||
PreferredAuthMethod: authMethod,
|
||||
UamiResourceID: config.Provider.Azure.UserAssignedIdentity,
|
||||
}
|
||||
return creds.ToCloudServiceAccountURI(), nil
|
||||
|
||||
case cloudprovider.OpenStack:
|
||||
creds := openstack.AccountKey{
|
||||
AuthURL: config.Provider.OpenStack.AuthURL,
|
||||
Username: config.Provider.OpenStack.Username,
|
||||
Password: config.Provider.OpenStack.Password,
|
||||
ProjectID: config.Provider.OpenStack.ProjectID,
|
||||
ProjectName: config.Provider.OpenStack.ProjectName,
|
||||
UserDomainName: config.Provider.OpenStack.UserDomainName,
|
||||
ProjectDomainName: config.Provider.OpenStack.ProjectDomainName,
|
||||
RegionName: config.Provider.OpenStack.RegionName,
|
||||
}
|
||||
return creds.ToCloudServiceAccountURI(), nil
|
||||
|
||||
case cloudprovider.QEMU:
|
||||
log.Debugf("Handling case for QEMU")
|
||||
return "", nil // QEMU does not use service account keys
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported cloud provider %q", provider)
|
||||
}
|
||||
}
|
@ -35,6 +35,8 @@ type File struct {
|
||||
// AttestationURL is the URL of the attestation service.
|
||||
// It is only set if the cluster is created on Azure.
|
||||
AttestationURL string `json:"attestationURL,omitempty"`
|
||||
// MeasurementSalt is the salt generated during cluster init.
|
||||
MeasurementSalt []byte `json:"measurementSalt,omitempty"`
|
||||
}
|
||||
|
||||
// Merge merges the other file into the current file and returns the result.
|
||||
|
@ -56,10 +56,8 @@ go_library(
|
||||
"//internal/atls",
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/azureshared",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/cloud/gcpshared",
|
||||
"//internal/cloud/openstack",
|
||||
"//internal/compatibility",
|
||||
"//internal/config",
|
||||
"//internal/config/instancetypes",
|
||||
|
@ -40,10 +40,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||
@ -195,7 +192,7 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
||||
return fmt.Errorf("creating new validator: %w", err)
|
||||
}
|
||||
i.log.Debugf("Created a new validator")
|
||||
serviceAccURI, err := i.getMarshaledServiceAccountURI(provider, conf)
|
||||
serviceAccURI, err := cloudcmd.GetMarshaledServiceAccountURI(provider, conf, i.pf, i.log, i.fileHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -211,7 +208,7 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating measurement salt: %w", err)
|
||||
}
|
||||
i.log.Debugf("Measurement salt: %x", measurementSalt)
|
||||
idFile.MeasurementSalt = measurementSalt
|
||||
|
||||
clusterName := clusterid.GetClusterName(conf, idFile)
|
||||
i.log.Debugf("Setting cluster name to %s", clusterName)
|
||||
@ -258,7 +255,7 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting Terraform output: %w", err)
|
||||
}
|
||||
releases, err := helmLoader.LoadReleases(conf, flags.conformance, flags.helmWaitMode, masterSecret, measurementSalt, serviceAccURI, idFile, output)
|
||||
releases, err := helmLoader.LoadReleases(conf, flags.conformance, flags.helmWaitMode, masterSecret, serviceAccURI, idFile, output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading Helm charts: %w", err)
|
||||
}
|
||||
@ -510,7 +507,7 @@ type initFlags struct {
|
||||
mergeConfigs bool
|
||||
}
|
||||
|
||||
// readOrGenerateMasterSecret reads a base64 encoded master secret from file or generates a new 32 byte secret.
|
||||
// generateMasterSecret reads a base64 encoded master secret from file or generates a new 32 byte secret.
|
||||
func (i *initCmd) generateMasterSecret(outWriter io.Writer) (uri.MasterSecret, error) {
|
||||
// No file given, generate a new secret, and save it to disk
|
||||
i.log.Debugf("Generating new master secret")
|
||||
@ -534,59 +531,6 @@ func (i *initCmd) generateMasterSecret(outWriter io.Writer) (uri.MasterSecret, e
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func (i *initCmd) getMarshaledServiceAccountURI(provider cloudprovider.Provider, config *config.Config,
|
||||
) (string, error) {
|
||||
i.log.Debugf("Getting service account URI")
|
||||
switch provider {
|
||||
case cloudprovider.GCP:
|
||||
i.log.Debugf("Handling case for GCP")
|
||||
i.log.Debugf("GCP service account key path %s", i.pf.PrefixPath(config.Provider.GCP.ServiceAccountKeyPath))
|
||||
|
||||
var key gcpshared.ServiceAccountKey
|
||||
if err := i.fileHandler.ReadJSON(config.Provider.GCP.ServiceAccountKeyPath, &key); err != nil {
|
||||
return "", fmt.Errorf("reading service account key from path %q: %w", i.pf.PrefixPath(config.Provider.GCP.ServiceAccountKeyPath), err)
|
||||
}
|
||||
i.log.Debugf("Read GCP service account key from path")
|
||||
return key.ToCloudServiceAccountURI(), nil
|
||||
|
||||
case cloudprovider.AWS:
|
||||
i.log.Debugf("Handling case for AWS")
|
||||
return "", nil // AWS does not need a service account URI
|
||||
case cloudprovider.Azure:
|
||||
i.log.Debugf("Handling case for Azure")
|
||||
|
||||
authMethod := azureshared.AuthMethodUserAssignedIdentity
|
||||
|
||||
creds := azureshared.ApplicationCredentials{
|
||||
TenantID: config.Provider.Azure.TenantID,
|
||||
Location: config.Provider.Azure.Location,
|
||||
PreferredAuthMethod: authMethod,
|
||||
UamiResourceID: config.Provider.Azure.UserAssignedIdentity,
|
||||
}
|
||||
return creds.ToCloudServiceAccountURI(), nil
|
||||
|
||||
case cloudprovider.OpenStack:
|
||||
creds := openstack.AccountKey{
|
||||
AuthURL: config.Provider.OpenStack.AuthURL,
|
||||
Username: config.Provider.OpenStack.Username,
|
||||
Password: config.Provider.OpenStack.Password,
|
||||
ProjectID: config.Provider.OpenStack.ProjectID,
|
||||
ProjectName: config.Provider.OpenStack.ProjectName,
|
||||
UserDomainName: config.Provider.OpenStack.UserDomainName,
|
||||
ProjectDomainName: config.Provider.OpenStack.ProjectDomainName,
|
||||
RegionName: config.Provider.OpenStack.RegionName,
|
||||
}
|
||||
return creds.ToCloudServiceAccountURI(), nil
|
||||
|
||||
case cloudprovider.QEMU:
|
||||
i.log.Debugf("Handling case for QEMU")
|
||||
return "", nil // QEMU does not use service account keys
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported cloud provider %q", provider)
|
||||
}
|
||||
}
|
||||
|
||||
type configMerger interface {
|
||||
mergeConfigs(configPath string, fileHandler file.Handler) error
|
||||
kubeconfigEnvVar() string
|
||||
|
@ -151,7 +151,7 @@ func status(
|
||||
}
|
||||
|
||||
func getAttestationConfig(ctx context.Context, cmClient configMapClient, attestVariant variant.Variant) (config.AttestationCfg, error) {
|
||||
joinConfig, err := cmClient.GetCurrentConfigMap(ctx, constants.JoinConfigMap)
|
||||
joinConfig, err := cmClient.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting current config map: %w", err)
|
||||
}
|
||||
@ -246,5 +246,5 @@ type kubeClient interface {
|
||||
}
|
||||
|
||||
type configMapClient interface {
|
||||
GetCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ func TestStatus(t *testing.T) {
|
||||
|
||||
type stubConfigMapperAWSNitro struct{}
|
||||
|
||||
func (s stubConfigMapperAWSNitro) GetCurrentConfigMap(_ context.Context, _ string) (*corev1.ConfigMap, error) {
|
||||
func (s stubConfigMapperAWSNitro) GetConfigMap(_ context.Context, _ string) (*corev1.ConfigMap, error) {
|
||||
return &corev1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"attestationConfig": `{"measurements":{"0":{"expected":"737f767a12f54e70eecbc8684011323ae2fe2dd9f90785577969d7a2013e8c12","warnOnly":true},"11":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"12":{"expected":"b8038d11eade4cfee5fd41da04bf64e58bab15c42bfe01801e4c0f61376ba010","warnOnly":false},"13":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"14":{"expected":"d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f","warnOnly":true},"15":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"2":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"3":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"4":{"expected":"55f7616b2c51dd7603f491c1c266373fe5c1e25e06a851d2090960172b03b27f","warnOnly":false},"6":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"7":{"expected":"fb71e5e55cefba9e2b396d17604de0fe6e1841a76758856a120833e3ad1c40a3","warnOnly":true},"8":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"9":{"expected":"f7480d37929bef4b61c32823cb7b3771aea19f7510db2e1478719a1d88f9775d","warnOnly":false}}}`,
|
||||
|
@ -21,6 +21,5 @@ func NewUpgradeCmd() *cobra.Command {
|
||||
|
||||
cmd.AddCommand(newUpgradeCheckCmd())
|
||||
cmd.AddCommand(newUpgradeApplyCmd())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -28,12 +28,12 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/rogpeppe/go-internal/diff"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func newUpgradeApplyCmd() *cobra.Command {
|
||||
@ -50,6 +50,8 @@ func newUpgradeApplyCmd() *cobra.Command {
|
||||
"WARNING: might unintentionally overwrite measurements in the running cluster.")
|
||||
cmd.Flags().Duration("timeout", 5*time.Minute, "change helm upgrade timeout\n"+
|
||||
"Might be useful for slow connections or big clusters.")
|
||||
cmd.Flags().Bool("conformance", false, "enable conformance mode")
|
||||
cmd.Flags().Bool("skip-helm-wait", false, "install helm charts without waiting for deployments to be ready")
|
||||
if err := cmd.Flags().MarkHidden("timeout"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -76,35 +78,31 @@ func runUpgradeApply(cmd *cobra.Command, _ []string) error {
|
||||
|
||||
imagefetcher := imagefetcher.New()
|
||||
configFetcher := attestationconfigapi.NewFetcher()
|
||||
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log, imageFetcher: imagefetcher, configFetcher: configFetcher}
|
||||
return applyCmd.upgradeApply(cmd, fileHandler, stableClientFactoryImpl)
|
||||
}
|
||||
tfClient, err := terraform.New(cmd.Context(), constants.TerraformWorkingDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up terraform client: %w", err)
|
||||
}
|
||||
|
||||
type stableClientFactory func(kubeconfigPath string) (getConfigMapper, error)
|
||||
|
||||
// needed because StableClient returns the bigger kubernetes.StableInterface.
|
||||
func stableClientFactoryImpl(kubeconfigPath string) (getConfigMapper, error) {
|
||||
return kubernetes.NewStableClient(kubeconfigPath)
|
||||
}
|
||||
|
||||
type getConfigMapper interface {
|
||||
GetCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log, imageFetcher: imagefetcher, configFetcher: configFetcher, clusterShower: tfClient, fileHandler: fileHandler}
|
||||
return applyCmd.upgradeApply(cmd)
|
||||
}
|
||||
|
||||
type upgradeApplyCmd struct {
|
||||
upgrader cloudUpgrader
|
||||
imageFetcher imageFetcher
|
||||
configFetcher attestationconfigapi.Fetcher
|
||||
clusterShower clusterShower
|
||||
fileHandler file.Handler
|
||||
log debugLog
|
||||
}
|
||||
|
||||
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Handler, stableClientFactory stableClientFactory) error {
|
||||
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command) error {
|
||||
flags, err := parseUpgradeApplyFlags(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing flags: %w", err)
|
||||
}
|
||||
|
||||
conf, err := config.New(fileHandler, constants.ConfigFilename, u.configFetcher, flags.force)
|
||||
conf, err := config.New(u.fileHandler, constants.ConfigFilename, u.configFetcher, flags.force)
|
||||
var configValidationErr *config.ValidationError
|
||||
if errors.As(err, &configValidationErr) {
|
||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||
@ -125,31 +123,39 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := handleInvalidK8sPatchVersion(cmd, conf.KubernetesVersion, flags.yes); err != nil {
|
||||
validK8sVersion, err := validK8sVersion(cmd, conf.KubernetesVersion, flags.yes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var idFile clusterid.File
|
||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||
return fmt.Errorf("reading cluster ID file: %w", err)
|
||||
}
|
||||
if idFile.MeasurementSalt == nil {
|
||||
// TODO(elchead): remove after 2.10, since 2.9 does not yet save it in the idfile
|
||||
measurementSalt, err := u.upgrader.GetMeasurementSalt(cmd.Context())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting join-config: %w", err)
|
||||
}
|
||||
idFile.MeasurementSalt = measurementSalt
|
||||
if err := u.fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptOverwrite); err != nil {
|
||||
return fmt.Errorf("writing cluster ID file: %w", err)
|
||||
}
|
||||
}
|
||||
conf.UpdateMAAURL(idFile.AttestationURL)
|
||||
|
||||
// If an image upgrade was just executed there won't be a diff. The function will return nil in that case.
|
||||
stableClient, err := stableClientFactory(constants.AdminConfFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating stable client: %w", err)
|
||||
}
|
||||
if err := u.upgradeAttestConfigIfDiff(cmd, stableClient, conf.GetAttestationConfig(), flags); err != nil {
|
||||
if err := u.confirmIfUpgradeAttestConfigHasDiff(cmd, conf.GetAttestationConfig(), flags); err != nil {
|
||||
return fmt.Errorf("upgrading measurements: %w", err)
|
||||
}
|
||||
// not moving existing Terraform migrator because of planned apply refactor
|
||||
if err := u.migrateTerraform(cmd, u.imageFetcher, conf, fileHandler, flags); err != nil {
|
||||
tfOutput, err := u.migrateTerraform(cmd, u.imageFetcher, conf, flags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("performing Terraform migrations: %w", err)
|
||||
}
|
||||
// reload idFile after terraform migration
|
||||
// it might have been updated by the migration
|
||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||
return fmt.Errorf("reading updated cluster ID file: %w", err)
|
||||
}
|
||||
|
||||
@ -164,10 +170,12 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
||||
|
||||
if conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP || conf.GetProvider() == cloudprovider.AWS {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
err = u.handleServiceUpgrade(cmd, conf, idFile, flags)
|
||||
err = u.handleServiceUpgrade(cmd, conf, idFile, tfOutput, validK8sVersion, flags)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
cmd.PrintErrln(err)
|
||||
case err == nil:
|
||||
cmd.Println("Successfully upgraded Constellation services.")
|
||||
case err != nil:
|
||||
return fmt.Errorf("upgrading services: %w", err)
|
||||
}
|
||||
@ -213,22 +221,22 @@ func getImage(ctx context.Context, conf *config.Config, fetcher imageFetcher) (s
|
||||
// migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration
|
||||
// of cloud resources with Terraform. If so, the migration is performed.
|
||||
func (u *upgradeApplyCmd) migrateTerraform(
|
||||
cmd *cobra.Command, fetcher imageFetcher, conf *config.Config, fileHandler file.Handler, flags upgradeApplyFlags,
|
||||
) error {
|
||||
cmd *cobra.Command, fetcher imageFetcher, conf *config.Config, flags upgradeApplyFlags,
|
||||
) (res terraform.ApplyOutput, err error) {
|
||||
u.log.Debugf("Planning Terraform migrations")
|
||||
|
||||
if err := u.upgrader.CheckTerraformMigrations(constants.UpgradeDir); err != nil {
|
||||
return fmt.Errorf("checking workspace: %w", err)
|
||||
return res, fmt.Errorf("checking workspace: %w", err)
|
||||
}
|
||||
|
||||
imageRef, err := getImage(cmd.Context(), conf, fetcher)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image reference: %w", err)
|
||||
return res, fmt.Errorf("fetching image reference: %w", err)
|
||||
}
|
||||
|
||||
vars, err := cloudcmd.TerraformUpgradeVars(conf, imageRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing upgrade variables: %w", err)
|
||||
return res, fmt.Errorf("parsing upgrade variables: %w", err)
|
||||
}
|
||||
u.log.Debugf("Using Terraform variables:\n%v", vars)
|
||||
|
||||
@ -252,7 +260,7 @@ func (u *upgradeApplyCmd) migrateTerraform(
|
||||
|
||||
hasDiff, err := u.upgrader.PlanTerraformMigrations(cmd.Context(), opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("planning terraform migrations: %w", err)
|
||||
return res, fmt.Errorf("planning terraform migrations: %w", err)
|
||||
}
|
||||
|
||||
if hasDiff {
|
||||
@ -261,24 +269,26 @@ func (u *upgradeApplyCmd) migrateTerraform(
|
||||
if !flags.yes {
|
||||
ok, err := askToConfirm(cmd, "Do you want to apply the Terraform migrations?")
|
||||
if err != nil {
|
||||
return fmt.Errorf("asking for confirmation: %w", err)
|
||||
return res, fmt.Errorf("asking for confirmation: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
cmd.Println("Aborting upgrade.")
|
||||
if err := u.upgrader.CleanUpTerraformMigrations(constants.UpgradeDir); err != nil {
|
||||
return fmt.Errorf("cleaning up workspace: %w", err)
|
||||
return res, fmt.Errorf("cleaning up workspace: %w", err)
|
||||
}
|
||||
return fmt.Errorf("aborted by user")
|
||||
return res, fmt.Errorf("aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
u.log.Debugf("Applying Terraform migrations")
|
||||
newIDFile, err := u.upgrader.ApplyTerraformMigrations(cmd.Context(), opts)
|
||||
tfOutput, err := u.upgrader.ApplyTerraformMigrations(cmd.Context(), opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying terraform migrations: %w", err)
|
||||
return tfOutput, fmt.Errorf("applying terraform migrations: %w", err)
|
||||
}
|
||||
if err := mergeClusterIDFile(constants.ClusterIDsFilename, newIDFile, fileHandler); err != nil {
|
||||
return fmt.Errorf("merging cluster ID files: %w", err)
|
||||
|
||||
// Patch MAA policy if we applied an Azure upgrade.
|
||||
newIDFile := newIDFile(opts, tfOutput)
|
||||
if err := mergeClusterIDFile(constants.ClusterIDsFilename, newIDFile, u.fileHandler); err != nil {
|
||||
return tfOutput, fmt.Errorf("merging cluster ID files: %w", err)
|
||||
}
|
||||
|
||||
cmd.Printf("Terraform migrations applied successfully and output written to: %s\n"+
|
||||
@ -287,26 +297,47 @@ func (u *upgradeApplyCmd) migrateTerraform(
|
||||
} else {
|
||||
u.log.Debugf("No Terraform diff detected")
|
||||
}
|
||||
|
||||
return nil
|
||||
u.log.Debugf("No Terraform diff detected")
|
||||
tfOutput, err := u.clusterShower.ShowCluster(cmd.Context(), conf.GetProvider())
|
||||
if err != nil {
|
||||
return tfOutput, fmt.Errorf("getting Terraform output: %w", err)
|
||||
}
|
||||
return tfOutput, nil
|
||||
}
|
||||
|
||||
// handleInvalidK8sPatchVersion checks if the Kubernetes patch version is supported and asks for confirmation if not.
|
||||
func handleInvalidK8sPatchVersion(cmd *cobra.Command, version string, yes bool) error {
|
||||
_, err := versions.NewValidK8sVersion(version, true)
|
||||
func newIDFile(opts upgrade.TerraformUpgradeOptions, tfOutput terraform.ApplyOutput) clusterid.File {
|
||||
newIDFile := clusterid.File{
|
||||
CloudProvider: opts.CSP,
|
||||
InitSecret: []byte(tfOutput.Secret),
|
||||
IP: tfOutput.IP,
|
||||
APIServerCertSANs: tfOutput.APIServerCertSANs,
|
||||
UID: tfOutput.UID,
|
||||
}
|
||||
if tfOutput.Azure != nil {
|
||||
newIDFile.AttestationURL = tfOutput.Azure.AttestationURL
|
||||
}
|
||||
return newIDFile
|
||||
}
|
||||
|
||||
// validK8sVersion checks if the Kubernetes patch version is supported and asks for confirmation if not.
|
||||
func validK8sVersion(cmd *cobra.Command, version string, yes bool) (validVersion versions.ValidK8sVersion, err error) {
|
||||
validVersion, err = versions.NewValidK8sVersion(version, true)
|
||||
if versions.IsPreviewK8sVersion(validVersion) {
|
||||
cmd.PrintErrf("Warning: Constellation with Kubernetes %v is still in preview. Use only for evaluation purposes.\n", validVersion)
|
||||
}
|
||||
valid := err == nil
|
||||
|
||||
if !valid && !yes {
|
||||
confirmed, err := askToConfirm(cmd, fmt.Sprintf("WARNING: The Kubernetes patch version %s is not supported. If you continue, Kubernetes upgrades will be skipped. Do you want to continue anyway?", version))
|
||||
if err != nil {
|
||||
return fmt.Errorf("asking for confirmation: %w", err)
|
||||
return validVersion, fmt.Errorf("asking for confirmation: %w", err)
|
||||
}
|
||||
if !confirmed {
|
||||
return fmt.Errorf("aborted by user")
|
||||
return validVersion, fmt.Errorf("aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return validVersion, nil
|
||||
}
|
||||
|
||||
type imageFetcher interface {
|
||||
@ -316,13 +347,14 @@ type imageFetcher interface {
|
||||
) (string, error)
|
||||
}
|
||||
|
||||
// upgradeAttestConfigIfDiff checks if the locally configured measurements are different from the cluster's measurements.
|
||||
// If so the function will ask the user to confirm (if --yes is not set) and upgrade the measurements only.
|
||||
func (u *upgradeApplyCmd) upgradeAttestConfigIfDiff(cmd *cobra.Command, stableClient getConfigMapper, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
|
||||
clusterAttestationConfig, err := getAttestationConfig(cmd.Context(), stableClient, newConfig.GetVariant())
|
||||
// confirmIfUpgradeAttestConfigHasDiff checks if the locally configured measurements are different from the cluster's measurements.
|
||||
// If so the function will ask the user to confirm (if --yes is not set).
|
||||
func (u *upgradeApplyCmd) confirmIfUpgradeAttestConfigHasDiff(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
|
||||
clusterAttestationConfig, err := u.upgrader.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting cluster attestation config: %w", err)
|
||||
}
|
||||
|
||||
// If the current config is equal, or there is an error when comparing the configs, we skip the upgrade.
|
||||
equal, err := newConfig.EqualTo(clusterAttestationConfig)
|
||||
if err != nil {
|
||||
@ -331,32 +363,39 @@ func (u *upgradeApplyCmd) upgradeAttestConfigIfDiff(cmd *cobra.Command, stableCl
|
||||
if equal {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd.Println("The configured attestation config is different from the attestation config in the cluster.")
|
||||
diffStr, err := diffAttestationCfg(clusterAttestationConfig, newConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("diffing attestation configs: %w", err)
|
||||
}
|
||||
cmd.Println("The following changes will be applied to the attestation config:")
|
||||
cmd.Println(diffStr)
|
||||
if !flags.yes {
|
||||
cmd.Println("The configured attestation config is different from the attestation config in the cluster.")
|
||||
diffStr, err := diffAttestationCfg(clusterAttestationConfig, newConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("diffing attestation configs: %w", err)
|
||||
}
|
||||
cmd.Println("The following changes will be applied to the attestation config:")
|
||||
cmd.Println(diffStr)
|
||||
ok, err := askToConfirm(cmd, "Are you sure you want to change your cluster's attestation config?")
|
||||
if err != nil {
|
||||
return fmt.Errorf("asking for confirmation: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
cmd.Println("Skipping upgrade.")
|
||||
return nil
|
||||
return errors.New("aborting upgrade since attestation config is different")
|
||||
}
|
||||
}
|
||||
if err := u.upgrader.UpdateAttestationConfig(cmd.Context(), newConfig); err != nil {
|
||||
return fmt.Errorf("updating attestation config: %w", err)
|
||||
// TODO(elchead): move this outside this function to remove the side effect.
|
||||
if err := u.upgrader.BackupConfigMap(cmd.Context(), constants.JoinConfigMap); err != nil {
|
||||
return fmt.Errorf("backing up join-config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.Config, idFile clusterid.File, flags upgradeApplyFlags) error {
|
||||
err := u.upgrader.UpgradeHelmServices(cmd.Context(), conf, idFile, flags.upgradeTimeout, helm.DenyDestructive, flags.force)
|
||||
func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.Config, idFile clusterid.File, tfOutput terraform.ApplyOutput, validK8sVersion versions.ValidK8sVersion, flags upgradeApplyFlags) error {
|
||||
var secret uri.MasterSecret
|
||||
if err := u.fileHandler.ReadJSON(flags.pf.PrefixPath(constants.MasterSecretFilename), &secret); err != nil {
|
||||
return fmt.Errorf("reading master secret: %w", err)
|
||||
}
|
||||
serviceAccURI, err := cloudcmd.GetMarshaledServiceAccountURI(conf.GetProvider(), conf, flags.pf, u.log, u.fileHandler)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting service account URI: %w", err)
|
||||
}
|
||||
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, idFile, flags.upgradeTimeout, helm.DenyDestructive, flags.force, flags.conformance, flags.helmWaitMode, secret, serviceAccURI, validK8sVersion, tfOutput)
|
||||
if errors.Is(err, helm.ErrConfirmationMissing) {
|
||||
if !flags.yes {
|
||||
cmd.PrintErrln("WARNING: Upgrading cert-manager will destroy all custom resources you have manually created that are based on the current version of cert-manager.")
|
||||
@ -369,7 +408,7 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, idFile, flags.upgradeTimeout, helm.AllowDestructive, flags.force)
|
||||
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, idFile, flags.upgradeTimeout, helm.AllowDestructive, flags.force, flags.conformance, flags.helmWaitMode, secret, serviceAccURI, validK8sVersion, tfOutput)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -405,12 +444,26 @@ func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
||||
return upgradeApplyFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
||||
}
|
||||
|
||||
conformance, err := cmd.Flags().GetBool("conformance")
|
||||
if err != nil {
|
||||
return upgradeApplyFlags{}, fmt.Errorf("parsing conformance flag: %w", err)
|
||||
}
|
||||
skipHelmWait, err := cmd.Flags().GetBool("skip-helm-wait")
|
||||
if err != nil {
|
||||
return upgradeApplyFlags{}, fmt.Errorf("parsing skip-helm-wait flag: %w", err)
|
||||
}
|
||||
helmWaitMode := helm.WaitModeAtomic
|
||||
if skipHelmWait {
|
||||
helmWaitMode = helm.WaitModeNone
|
||||
}
|
||||
return upgradeApplyFlags{
|
||||
pf: pathprefix.New(workDir),
|
||||
yes: yes,
|
||||
upgradeTimeout: timeout,
|
||||
force: force,
|
||||
terraformLogLevel: logLevel,
|
||||
conformance: conformance,
|
||||
helmWaitMode: helmWaitMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -433,17 +486,20 @@ type upgradeApplyFlags struct {
|
||||
upgradeTimeout time.Duration
|
||||
force bool
|
||||
terraformLogLevel terraform.LogLevel
|
||||
conformance bool
|
||||
helmWaitMode helm.WaitMode
|
||||
}
|
||||
|
||||
type cloudUpgrader interface {
|
||||
UpgradeNodeVersion(ctx context.Context, conf *config.Config, force bool) error
|
||||
UpgradeHelmServices(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive bool, force bool) error
|
||||
UpdateAttestationConfig(ctx context.Context, newConfig config.AttestationCfg) error
|
||||
UpgradeHelmServices(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive bool, force bool, conformance bool, helmWaitMode helm.WaitMode, masterSecret uri.MasterSecret, serviceAccURI string, validK8sVersion versions.ValidK8sVersion, tfOutput terraform.ApplyOutput) error
|
||||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error)
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
GetMeasurementSalt(ctx context.Context) ([]byte, error)
|
||||
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)
|
||||
ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (clusterid.File, error)
|
||||
ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (terraform.ApplyOutput, error)
|
||||
CheckTerraformMigrations(upgradeWorkspace string) error
|
||||
CleanUpTerraformMigrations(upgradeWorkspace string) error
|
||||
GetUpgradeID() string
|
||||
BackupConfigMap(ctx context.Context, name string) error
|
||||
}
|
||||
|
@ -9,41 +9,44 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUpgradeApply(t *testing.T) {
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
upgrader stubUpgrader
|
||||
fetcher stubImageFetcher
|
||||
wantErr bool
|
||||
yesFlag bool
|
||||
stdin string
|
||||
upgrader *stubUpgrader
|
||||
fetcher stubImageFetcher
|
||||
wantErr bool
|
||||
yesFlag bool
|
||||
dontWantJoinConfigBackup bool
|
||||
stdin string
|
||||
}{
|
||||
"success": {
|
||||
upgrader: stubUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
upgrader: &stubUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
yesFlag: true,
|
||||
},
|
||||
"nodeVersion some error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
nodeVersionErr: someErr,
|
||||
},
|
||||
@ -51,14 +54,14 @@ func TestUpgradeApply(t *testing.T) {
|
||||
yesFlag: true,
|
||||
},
|
||||
"nodeVersion in progress error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
nodeVersionErr: kubernetes.ErrInProgress,
|
||||
},
|
||||
yesFlag: true,
|
||||
},
|
||||
"helm other error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
helmErr: someErr,
|
||||
},
|
||||
@ -67,7 +70,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
yesFlag: true,
|
||||
},
|
||||
"check terraform error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
checkTerraformErr: someErr,
|
||||
},
|
||||
@ -76,7 +79,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
yesFlag: true,
|
||||
},
|
||||
"abort": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
terraformDiff: true,
|
||||
},
|
||||
@ -85,7 +88,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
stdin: "no\n",
|
||||
},
|
||||
"clean terraform error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
cleanTerraformErr: someErr,
|
||||
terraformDiff: true,
|
||||
@ -95,7 +98,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
stdin: "no\n",
|
||||
},
|
||||
"plan terraform error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
planTerraformErr: someErr,
|
||||
},
|
||||
@ -104,7 +107,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
yesFlag: true,
|
||||
},
|
||||
"apply terraform error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
applyTerraformErr: someErr,
|
||||
terraformDiff: true,
|
||||
@ -114,13 +117,21 @@ func TestUpgradeApply(t *testing.T) {
|
||||
yesFlag: true,
|
||||
},
|
||||
"fetch reference error": {
|
||||
upgrader: stubUpgrader{
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
fetcher: stubImageFetcher{fetchReferenceErr: someErr},
|
||||
wantErr: true,
|
||||
yesFlag: true,
|
||||
},
|
||||
"do no backup join-config when remote attestation config is the same": {
|
||||
upgrader: &stubUpgrader{
|
||||
currentConfig: fakeAzureAttestationConfigFromCluster(context.Background(), t, cloudprovider.Azure),
|
||||
},
|
||||
fetcher: stubImageFetcher{},
|
||||
yesFlag: true,
|
||||
dontWantJoinConfigBackup: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -139,43 +150,26 @@ func TestUpgradeApply(t *testing.T) {
|
||||
}
|
||||
|
||||
handler := file.NewHandler(afero.NewMemMapFs())
|
||||
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
|
||||
|
||||
remoteAttestationCfg := fakeAttestationConfigFromCluster(cmd.Context(), t, cloudprovider.Azure)
|
||||
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
|
||||
|
||||
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
|
||||
require.NoError(handler.WriteJSON(constants.ClusterIDsFilename, clusterid.File{}))
|
||||
require.NoError(handler.WriteJSON(constants.MasterSecretFilename, uri.MasterSecret{}))
|
||||
|
||||
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), imageFetcher: tc.fetcher, configFetcher: stubAttestationFetcher{}}
|
||||
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), imageFetcher: tc.fetcher, configFetcher: stubAttestationFetcher{}, clusterShower: &stubShowCluster{}, fileHandler: handler}
|
||||
|
||||
stubStableClientFactory := func(_ string) (getConfigMapper, error) {
|
||||
return stubGetConfigMap{remoteAttestationCfg}, nil
|
||||
}
|
||||
err := upgrader.upgradeApply(cmd, handler, stubStableClientFactory)
|
||||
err := upgrader.upgradeApply(cmd)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
assert.Equal(!tc.dontWantJoinConfigBackup, tc.upgrader.backupWasCalled)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubGetConfigMap struct {
|
||||
attestationCfg config.AttestationCfg
|
||||
}
|
||||
|
||||
func (s stubGetConfigMap) GetCurrentConfigMap(_ context.Context, _ string) (*corev1.ConfigMap, error) {
|
||||
data, err := json.Marshal(s.attestationCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dataMap := map[string]string{
|
||||
constants.AttestationConfigFilename: string(data),
|
||||
}
|
||||
return &corev1.ConfigMap{Data: dataMap}, nil
|
||||
}
|
||||
|
||||
type stubUpgrader struct {
|
||||
currentConfig config.AttestationCfg
|
||||
nodeVersionErr error
|
||||
@ -185,17 +179,27 @@ type stubUpgrader struct {
|
||||
checkTerraformErr error
|
||||
applyTerraformErr error
|
||||
cleanTerraformErr error
|
||||
backupWasCalled bool
|
||||
}
|
||||
|
||||
func (u stubUpgrader) GetMeasurementSalt(_ context.Context) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (u stubUpgrader) GetUpgradeID() string {
|
||||
return "test-upgrade"
|
||||
}
|
||||
|
||||
func (u *stubUpgrader) BackupConfigMap(_ context.Context, _ string) error {
|
||||
u.backupWasCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u stubUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _ bool) error {
|
||||
return u.nodeVersionErr
|
||||
}
|
||||
|
||||
func (u stubUpgrader) UpgradeHelmServices(_ context.Context, _ *config.Config, _ clusterid.File, _ time.Duration, _, _ bool) error {
|
||||
func (u stubUpgrader) UpgradeHelmServices(_ context.Context, _ *config.Config, _ clusterid.File, _ time.Duration, _, _, _ bool, _ helm.WaitMode, _ uri.MasterSecret, _ string, _ versions.ValidK8sVersion, _ terraform.ApplyOutput) error {
|
||||
return u.helmErr
|
||||
}
|
||||
|
||||
@ -203,8 +207,8 @@ func (u stubUpgrader) UpdateAttestationConfig(_ context.Context, _ config.Attest
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u stubUpgrader) GetClusterAttestationConfig(_ context.Context, _ variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error) {
|
||||
return u.currentConfig, &corev1.ConfigMap{}, nil
|
||||
func (u stubUpgrader) GetClusterAttestationConfig(_ context.Context, _ variant.Variant) (config.AttestationCfg, error) {
|
||||
return u.currentConfig, nil
|
||||
}
|
||||
|
||||
func (u stubUpgrader) CheckTerraformMigrations(_ string) error {
|
||||
@ -219,8 +223,8 @@ func (u stubUpgrader) PlanTerraformMigrations(context.Context, upgrade.Terraform
|
||||
return u.terraformDiff, u.planTerraformErr
|
||||
}
|
||||
|
||||
func (u stubUpgrader) ApplyTerraformMigrations(context.Context, upgrade.TerraformUpgradeOptions) (clusterid.File, error) {
|
||||
return clusterid.File{}, u.applyTerraformErr
|
||||
func (u stubUpgrader) ApplyTerraformMigrations(context.Context, upgrade.TerraformUpgradeOptions) (terraform.ApplyOutput, error) {
|
||||
return terraform.ApplyOutput{}, u.applyTerraformErr
|
||||
}
|
||||
|
||||
func (u stubUpgrader) ExtendClusterConfigCertSANs(_ context.Context, _ []string) error {
|
||||
@ -238,7 +242,7 @@ func (f stubImageFetcher) FetchReference(_ context.Context,
|
||||
return "", f.fetchReferenceErr
|
||||
}
|
||||
|
||||
func fakeAttestationConfigFromCluster(ctx context.Context, t *testing.T, provider cloudprovider.Provider) config.AttestationCfg {
|
||||
func fakeAzureAttestationConfigFromCluster(ctx context.Context, t *testing.T, provider cloudprovider.Provider) config.AttestationCfg {
|
||||
cpCfg := defaultConfigWithExpectedMeasurements(t, config.Default(), provider)
|
||||
// the cluster attestation config needs to have real version numbers that are translated from "latest" as defined in config.Default()
|
||||
err := cpCfg.Attestation.AzureSEVSNP.FetchAndSetLatestVersionNumbers(ctx, stubAttestationFetcher{}, time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC))
|
||||
|
@ -109,7 +109,7 @@ func NewLoader(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion,
|
||||
// LoadReleases loads the embedded helm charts and returns them as a HelmReleases object.
|
||||
func (i *ChartLoader) LoadReleases(
|
||||
config *config.Config, conformanceMode bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret,
|
||||
measurementSalt []byte, serviceAccURI string, idFile clusterid.File, output terraform.ApplyOutput,
|
||||
serviceAccURI string, idFile clusterid.File, output terraform.ApplyOutput,
|
||||
) (*Releases, error) {
|
||||
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
|
||||
if err != nil {
|
||||
@ -133,7 +133,11 @@ func (i *ChartLoader) LoadReleases(
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading constellation-services: %w", err)
|
||||
}
|
||||
svcVals, err := extraConstellationServicesValues(config, masterSecret, measurementSalt, idFile.UID, serviceAccURI, output)
|
||||
|
||||
if idFile.MeasurementSalt == nil {
|
||||
return nil, errors.New("missing measurement salt in idFile")
|
||||
}
|
||||
svcVals, err := extraConstellationServicesValues(config, masterSecret, idFile.MeasurementSalt, idFile.UID, serviceAccURI, output)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("extending constellation-services values: %w", err)
|
||||
}
|
||||
|
@ -70,8 +70,8 @@ func TestLoadReleases(t *testing.T) {
|
||||
chartLoader := ChartLoader{csp: config.GetProvider()}
|
||||
helmReleases, err := chartLoader.LoadReleases(
|
||||
config, true, WaitModeAtomic,
|
||||
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("salt")}, []byte("salt"),
|
||||
fakeServiceAccURI(cloudprovider.GCP), clusterid.File{UID: "testuid"}, terraform.ApplyOutput{GCP: &terraform.GCPApplyOutput{}},
|
||||
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")},
|
||||
fakeServiceAccURI(cloudprovider.GCP), clusterid.File{UID: "testuid", MeasurementSalt: []byte("measurementSalt")}, terraform.ApplyOutput{GCP: &terraform.GCPApplyOutput{}},
|
||||
)
|
||||
require.NoError(err)
|
||||
chart := helmReleases.ConstellationServices.Chart
|
||||
|
@ -13,20 +13,21 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/release"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/spf13/afero"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/release"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -113,53 +114,58 @@ func (c *UpgradeClient) shouldUpgrade(releaseName string, newVersion semver.Semv
|
||||
// Upgrade runs a helm-upgrade on all deployments that are managed via Helm.
|
||||
// If the CLI receives an interrupt signal it will cancel the context.
|
||||
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
|
||||
func (c *UpgradeClient) Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string) error {
|
||||
func (c *UpgradeClient) Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration,
|
||||
allowDestructive, force bool, upgradeID string, conformance bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret,
|
||||
serviceAccURI string, validK8sVersion versions.ValidK8sVersion, output terraform.ApplyOutput,
|
||||
) error {
|
||||
upgradeErrs := []error{}
|
||||
upgradeReleases := []*chart.Chart{}
|
||||
newReleases := []*chart.Chart{}
|
||||
for _, info := range getManagedCharts(config) {
|
||||
c.log.Debugf("Checking release %s", info.releaseName)
|
||||
chart, err := loadChartsDir(helmFS, info.path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading chart: %w", err)
|
||||
}
|
||||
upgradeReleases := []Release{}
|
||||
newReleases := []Release{}
|
||||
|
||||
clusterName := clusterid.GetClusterName(config, idFile)
|
||||
helmLoader := NewLoader(config.GetProvider(), validK8sVersion, clusterName)
|
||||
c.log.Debugf("Created new Helm loader")
|
||||
releases, err := helmLoader.LoadReleases(config, conformance, helmWaitMode, masterSecret, serviceAccURI, idFile, output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading releases: %w", err)
|
||||
}
|
||||
for _, release := range getManagedReleases(config, releases) {
|
||||
var invalidUpgrade *compatibility.InvalidUpgradeError
|
||||
// Get version of the chart embedded in the CLI
|
||||
// This is the version we are upgrading to
|
||||
// Since our bundled charts are embedded with version 0.0.0,
|
||||
// we need to update them to the same version as the CLI
|
||||
var upgradeVersion semver.Semver
|
||||
if isCLIVersionedRelease(info.releaseName) {
|
||||
updateVersions(chart, constants.BinaryVersion())
|
||||
if isCLIVersionedRelease(release.ReleaseName) {
|
||||
updateVersions(release.Chart, constants.BinaryVersion())
|
||||
upgradeVersion = config.MicroserviceVersion
|
||||
} else {
|
||||
chartVersion, err := semver.New(chart.Metadata.Version)
|
||||
chartVersion, err := semver.New(release.Chart.Metadata.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing chart version: %w", err)
|
||||
}
|
||||
upgradeVersion = chartVersion
|
||||
}
|
||||
|
||||
var invalidUpgrade *compatibility.InvalidUpgradeError
|
||||
err = c.shouldUpgrade(info.releaseName, upgradeVersion, force)
|
||||
err = c.shouldUpgrade(release.ReleaseName, upgradeVersion, force)
|
||||
switch {
|
||||
case errors.Is(err, errReleaseNotFound):
|
||||
// if the release is not found, we need to install it
|
||||
c.log.Debugf("Release %s not found, adding to new releases...", info.releaseName)
|
||||
newReleases = append(newReleases, chart)
|
||||
c.log.Debugf("Release %s not found, adding to new releases...", release.ReleaseName)
|
||||
newReleases = append(newReleases, release)
|
||||
case errors.As(err, &invalidUpgrade):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping %s upgrade: %w", info.releaseName, err))
|
||||
c.log.Debugf("Appending to %s upgrade: %s", release.ReleaseName, err)
|
||||
upgradeReleases = append(upgradeReleases, release)
|
||||
case err != nil:
|
||||
c.log.Debugf("Adding %s to upgrade releases...", info.releaseName)
|
||||
return fmt.Errorf("should upgrade %s: %w", info.releaseName, err)
|
||||
c.log.Debugf("Adding %s to upgrade releases...", release.ReleaseName)
|
||||
return fmt.Errorf("should upgrade %s: %w", release.ReleaseName, err)
|
||||
case err == nil:
|
||||
upgradeReleases = append(upgradeReleases, chart)
|
||||
upgradeReleases = append(upgradeReleases, release)
|
||||
|
||||
// Check if installing/upgrading the chart could be destructive
|
||||
// If so, we don't want to perform any actions,
|
||||
// unless the user confirms it to be OK.
|
||||
if !allowDestructive &&
|
||||
info.chartName == certManagerInfo.chartName {
|
||||
release.ReleaseName == certManagerInfo.releaseName {
|
||||
return ErrConfirmationMissing
|
||||
}
|
||||
}
|
||||
@ -177,10 +183,15 @@ func (c *UpgradeClient) Upgrade(ctx context.Context, config *config.Config, idFi
|
||||
}
|
||||
}
|
||||
|
||||
for _, chart := range upgradeReleases {
|
||||
c.log.Debugf("Upgrading release %s", chart.Metadata.Name)
|
||||
if err := c.upgradeRelease(ctx, timeout, config, idFile, chart); err != nil {
|
||||
return fmt.Errorf("upgrading %s: %w", chart.Metadata.Name, err)
|
||||
for _, release := range upgradeReleases {
|
||||
c.log.Debugf("Upgrading release %s", release.Chart.Metadata.Name)
|
||||
if release.ReleaseName == constellationOperatorsInfo.releaseName {
|
||||
if err := c.updateCRDs(ctx, release.Chart); err != nil {
|
||||
return fmt.Errorf("updating operator CRDs: %w", err)
|
||||
}
|
||||
}
|
||||
if err := c.upgradeRelease(ctx, timeout, release); err != nil {
|
||||
return fmt.Errorf("upgrading %s: %w", release.Chart.Metadata.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,25 +201,26 @@ func (c *UpgradeClient) Upgrade(ctx context.Context, config *config.Config, idFi
|
||||
// that the new release is installed without creating naming conflicts.
|
||||
// If in the future, we require to install a new release before upgrading existing ones,
|
||||
// it should be done in a separate loop, instead of moving this one up.
|
||||
for _, chart := range newReleases {
|
||||
c.log.Debugf("Installing new release %s", chart.Metadata.Name)
|
||||
if err := c.installNewRelease(ctx, timeout, config, idFile, chart); err != nil {
|
||||
return fmt.Errorf("upgrading %s: %w", chart.Metadata.Name, err)
|
||||
for _, release := range newReleases {
|
||||
c.log.Debugf("Installing new release %s", release.Chart.Metadata.Name)
|
||||
if err := c.installNewRelease(ctx, timeout, release); err != nil {
|
||||
return fmt.Errorf("upgrading %s: %w", release.Chart.Metadata.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
func getManagedCharts(config *config.Config) []chartInfo {
|
||||
charts := []chartInfo{ciliumInfo, certManagerInfo, constellationOperatorsInfo, constellationServicesInfo}
|
||||
func getManagedReleases(config *config.Config, releases *Releases) []Release {
|
||||
res := []Release{releases.Cilium, releases.CertManager, releases.ConstellationOperators, releases.ConstellationServices}
|
||||
|
||||
if config.GetProvider() == cloudprovider.AWS {
|
||||
charts = append(charts, awsLBControllerInfo)
|
||||
res = append(res, *releases.AWSLoadBalancerController)
|
||||
}
|
||||
if config.DeployCSIDriver() {
|
||||
charts = append(charts, csiInfo)
|
||||
res = append(res, *releases.CSI)
|
||||
}
|
||||
return charts
|
||||
return res
|
||||
}
|
||||
|
||||
// Versions queries the cluster for running versions and returns a map of releaseName -> version.
|
||||
@ -306,130 +318,16 @@ func (c *UpgradeClient) csiVersions() (map[string]semver.Semver, error) {
|
||||
|
||||
// installNewRelease installs a previously not installed release on the cluster.
|
||||
func (c *UpgradeClient) installNewRelease(
|
||||
ctx context.Context, timeout time.Duration, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
|
||||
ctx context.Context, timeout time.Duration, release Release,
|
||||
) error {
|
||||
releaseName, values, err := c.loadUpgradeValues(ctx, conf, idFile, chart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading values: %w", err)
|
||||
}
|
||||
return c.actions.installAction(ctx, releaseName, chart, values, timeout)
|
||||
return c.actions.installAction(ctx, release.ReleaseName, release.Chart, release.Values, timeout)
|
||||
}
|
||||
|
||||
// upgradeRelease upgrades a release running on the cluster.
|
||||
func (c *UpgradeClient) upgradeRelease(
|
||||
ctx context.Context, timeout time.Duration, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
|
||||
ctx context.Context, timeout time.Duration, release Release,
|
||||
) error {
|
||||
releaseName, values, err := c.loadUpgradeValues(ctx, conf, idFile, chart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading values: %w", err)
|
||||
}
|
||||
|
||||
values, err = c.mergeClusterValues(values, releaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preparing values: %w", err)
|
||||
}
|
||||
|
||||
return c.actions.upgradeAction(ctx, releaseName, chart, values, timeout)
|
||||
}
|
||||
|
||||
// loadUpgradeValues loads values for a chart required for running an upgrade.
|
||||
func (c *UpgradeClient) loadUpgradeValues(ctx context.Context, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
|
||||
) (string, map[string]any, error) {
|
||||
// We need to load all values that can be statically loaded before merging them with the cluster
|
||||
// values. Otherwise the templates are not rendered correctly.
|
||||
k8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion, false)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("validating k8s version: %s", conf.KubernetesVersion)
|
||||
}
|
||||
|
||||
c.log.Debugf("Checking cluster ID file")
|
||||
clusterName := clusterid.GetClusterName(conf, idFile)
|
||||
|
||||
loader := NewLoader(conf.GetProvider(), k8sVersion, clusterName)
|
||||
|
||||
var values map[string]any
|
||||
var releaseName string
|
||||
|
||||
switch chart.Metadata.Name {
|
||||
case ciliumInfo.chartName:
|
||||
releaseName = ciliumInfo.releaseName
|
||||
var ok bool
|
||||
values, ok = ciliumVals[conf.GetProvider().String()]
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("cilium values for csp %q not found", conf.GetProvider().String())
|
||||
}
|
||||
case certManagerInfo.chartName:
|
||||
releaseName = certManagerInfo.releaseName
|
||||
values = loader.loadCertManagerValues()
|
||||
case constellationOperatorsInfo.chartName:
|
||||
releaseName = constellationOperatorsInfo.releaseName
|
||||
values = loader.loadOperatorsValues()
|
||||
|
||||
if err := c.updateCRDs(ctx, chart); err != nil {
|
||||
return "", nil, fmt.Errorf("updating CRDs: %w", err)
|
||||
}
|
||||
case constellationServicesInfo.chartName:
|
||||
releaseName = constellationServicesInfo.releaseName
|
||||
values = loader.loadConstellationServicesValues()
|
||||
|
||||
if err := c.applyMigrations(ctx, releaseName, values, conf); err != nil {
|
||||
return "", nil, fmt.Errorf("applying migrations: %w", err)
|
||||
}
|
||||
case csiInfo.chartName:
|
||||
releaseName = csiInfo.releaseName
|
||||
values = loader.loadCSIValues()
|
||||
case awsLBControllerInfo.chartName:
|
||||
releaseName = awsLBControllerInfo.releaseName
|
||||
values = loader.loadAWSLBControllerValues()
|
||||
default:
|
||||
return "", nil, fmt.Errorf("unknown chart name: %s", chart.Metadata.Name)
|
||||
}
|
||||
|
||||
return releaseName, values, nil
|
||||
}
|
||||
|
||||
// applyMigrations checks the from version and applies the necessary migrations.
|
||||
// The function assumes the caller has verified that our version drift restriction is not violated,
|
||||
// Currently, this is done during config validation.
|
||||
func (c *UpgradeClient) applyMigrations(ctx context.Context, releaseName string, values map[string]any, conf *config.Config) error {
|
||||
current, err := c.currentVersion(releaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting %s version: %w", releaseName, err)
|
||||
}
|
||||
|
||||
if current.Major() == 2 && current.Minor() == 8 {
|
||||
// Rename/change the following function to implement any necessary migrations.
|
||||
return migrateFrom2_8(ctx, values, conf, c.kubectl)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateFrom2_8 is currently a no-op that is kept for documentation purposes.
|
||||
// If you have to implement the function please make sure to update the below comment to your situation.
|
||||
// migrateFrom2_8 applies the necessary migrations for upgrading from v2.8.x to v2.9.x.
|
||||
// migrateFrom2_8 should be applied for v2.8.x --> v2.9.x.
|
||||
// migrateFrom2_8 should NOT be applied for v2.8.0 --> v2.9.x.
|
||||
func migrateFrom2_8(_ context.Context, _ map[string]any, _ *config.Config, _ crdClient) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeClusterValues returns a values map as required for helm-upgrade.
|
||||
// It imitates the behaviour of helm's reuse-values flag by fetching the current values from the cluster
|
||||
// and merging the fetched values with the locally found values.
|
||||
// This is done to ensure that new values (from upgrades of the local files) end up in the cluster.
|
||||
// reuse-values does not ensure this.
|
||||
func (c *UpgradeClient) mergeClusterValues(localValues map[string]any, releaseName string) (map[string]any, error) {
|
||||
// Ensure installCRDs is set for cert-manager chart.
|
||||
if releaseName == certManagerInfo.releaseName {
|
||||
localValues["installCRDs"] = true
|
||||
}
|
||||
clusterValues, err := c.actions.getValues(releaseName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting values for %s: %w", releaseName, err)
|
||||
}
|
||||
|
||||
return mergeMaps(clusterValues, localValues), nil
|
||||
return c.actions.upgradeAction(ctx, release.ReleaseName, release.Chart, release.Values, timeout)
|
||||
}
|
||||
|
||||
// GetValues queries the cluster for the values of the given release.
|
||||
|
@ -11,9 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -81,7 +79,8 @@ func TestUpgradeRelease(t *testing.T) {
|
||||
|
||||
chart, err := loadChartsDir(helmFS, certManagerInfo.path)
|
||||
require.NoError(err)
|
||||
err = client.upgradeRelease(context.Background(), 0, config.Default(), clusterid.File{UID: "test"}, chart)
|
||||
|
||||
err = client.upgradeRelease(context.Background(), 0, Release{Chart: chart})
|
||||
if tc.wantError {
|
||||
assert.Error(err)
|
||||
return
|
||||
|
@ -23,6 +23,7 @@ go_library(
|
||||
"//internal/constants",
|
||||
"//internal/file",
|
||||
"//internal/imagefetcher",
|
||||
"//internal/kms/uri",
|
||||
"//internal/kubernetes",
|
||||
"//internal/kubernetes/kubectl",
|
||||
"//internal/versions",
|
||||
@ -49,7 +50,6 @@ go_test(
|
||||
srcs = ["upgrade_test.go"],
|
||||
embed = [":kubernetes"],
|
||||
deps = [
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/compatibility",
|
||||
|
@ -42,7 +42,7 @@ func newClient(kubeconfigPath string) (kubernetes.Interface, error) {
|
||||
|
||||
// StableInterface is an interface to interact with stable resources.
|
||||
type StableInterface interface {
|
||||
GetCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
UpdateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
CreateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
KubernetesVersion() (string, error)
|
||||
@ -61,8 +61,8 @@ type stableClient struct {
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
// GetCurrentConfigMap returns a ConfigMap given it's name.
|
||||
func (u *stableClient) GetCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
// GetConfigMap returns a ConfigMap given it's name.
|
||||
func (u *stableClient) GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,6 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
@ -155,6 +155,19 @@ func NewUpgrader(
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// GetMeasurementSalt returns the measurementSalt from the join-config.
|
||||
func (u *Upgrader) GetMeasurementSalt(ctx context.Context) ([]byte, error) {
|
||||
cm, err := u.stableInterface.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving current join-config: %w", err)
|
||||
}
|
||||
salt, ok := cm.BinaryData[constants.MeasurementSaltFilename]
|
||||
if !ok {
|
||||
return nil, errors.New("measurementSalt missing from join-config")
|
||||
}
|
||||
return salt, nil
|
||||
}
|
||||
|
||||
// GetUpgradeID returns the upgrade ID.
|
||||
func (u *Upgrader) GetUpgradeID() string {
|
||||
return u.upgradeID
|
||||
@ -183,13 +196,17 @@ func (u *Upgrader) PlanTerraformMigrations(ctx context.Context, opts upgrade.Ter
|
||||
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
||||
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
||||
// By the new one.
|
||||
func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (clusterid.File, error) {
|
||||
func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (terraform.ApplyOutput, error) {
|
||||
return u.tfUpgrader.ApplyTerraformMigrations(ctx, opts, u.upgradeID)
|
||||
}
|
||||
|
||||
// UpgradeHelmServices upgrade helm services.
|
||||
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive bool, force bool) error {
|
||||
return u.helmClient.Upgrade(ctx, config, idFile, timeout, allowDestructive, force, u.upgradeID)
|
||||
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration,
|
||||
allowDestructive bool, force bool, conformance bool, helmWaitMode helm.WaitMode, masterSecret uri.MasterSecret, serviceAccURI string,
|
||||
validK8sVersion versions.ValidK8sVersion, output terraform.ApplyOutput,
|
||||
) error {
|
||||
return u.helmClient.Upgrade(ctx, config, idFile, timeout, allowDestructive, force, u.upgradeID, conformance,
|
||||
helmWaitMode, masterSecret, serviceAccURI, validK8sVersion, output)
|
||||
}
|
||||
|
||||
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||
@ -293,56 +310,41 @@ func (u *Upgrader) CurrentKubernetesVersion(ctx context.Context) (string, error)
|
||||
return nodeVersion.Spec.KubernetesClusterVersion, nil
|
||||
}
|
||||
|
||||
// UpdateAttestationConfig fetches the cluster's attestation config, compares them to a new config,
|
||||
// and updates the cluster's config if it is different from the new one.
|
||||
func (u *Upgrader) UpdateAttestationConfig(ctx context.Context, newAttestConfig config.AttestationCfg) error {
|
||||
currentAttestConfig, joinConfig, err := u.GetClusterAttestationConfig(ctx, newAttestConfig.GetVariant())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting attestation config: %w", err)
|
||||
}
|
||||
equal, err := newAttestConfig.EqualTo(currentAttestConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("comparing attestation configs: %w", err)
|
||||
}
|
||||
if equal {
|
||||
fmt.Fprintln(u.outWriter, "Cluster is already using the chosen attestation config, skipping config upgrade")
|
||||
return nil
|
||||
}
|
||||
|
||||
// backup of previous measurements
|
||||
joinConfig.Data[constants.AttestationConfigFilename+"_backup"] = joinConfig.Data[constants.AttestationConfigFilename]
|
||||
|
||||
newConfigJSON, err := json.Marshal(newAttestConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling attestation config: %w", err)
|
||||
}
|
||||
joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON)
|
||||
u.log.Debugf("Triggering attestation config update now")
|
||||
if _, err = u.stableInterface.UpdateConfigMap(ctx, joinConfig); err != nil {
|
||||
return fmt.Errorf("setting new attestation config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(u.outWriter, "Successfully updated the cluster's attestation config")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterAttestationConfig fetches the join-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the attestation config.
|
||||
func (u *Upgrader) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error) {
|
||||
existingConf, err := u.stableInterface.GetCurrentConfigMap(ctx, constants.JoinConfigMap)
|
||||
func (u *Upgrader) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) {
|
||||
existingConf, err := u.stableInterface.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
}
|
||||
if _, ok := existingConf.Data[constants.AttestationConfigFilename]; !ok {
|
||||
return nil, nil, errors.New("attestation config missing from join-config")
|
||||
return nil, errors.New("attestation config missing from join-config")
|
||||
}
|
||||
|
||||
existingAttestationConfig, err := config.UnmarshalAttestationConfig([]byte(existingConf.Data[constants.AttestationConfigFilename]), variant)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
}
|
||||
|
||||
return existingAttestationConfig, existingConf, nil
|
||||
return existingAttestationConfig, nil
|
||||
}
|
||||
|
||||
// BackupConfigMap creates a backup of the given config map.
|
||||
func (u *Upgrader) BackupConfigMap(ctx context.Context, name string) error {
|
||||
cm, err := u.stableInterface.GetConfigMap(ctx, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting config map %s: %w", name, err)
|
||||
}
|
||||
backup := cm.DeepCopy()
|
||||
backup.ObjectMeta = metav1.ObjectMeta{}
|
||||
backup.Name = fmt.Sprintf("%s-backup", backup.Name)
|
||||
if _, err := u.stableInterface.CreateConfigMap(ctx, backup); err != nil {
|
||||
if _, err := u.stableInterface.UpdateConfigMap(ctx, backup); err != nil {
|
||||
return fmt.Errorf("updating backup config map: %w", err)
|
||||
}
|
||||
}
|
||||
u.log.Debugf("Successfully backed up config map %s", cm.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs.
|
||||
@ -391,7 +393,7 @@ func (u *Upgrader) ExtendClusterConfigCertSANs(ctx context.Context, alternativeN
|
||||
// GetClusterConfiguration fetches the kubeadm-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the ClusterConfiguration.
|
||||
func (u *Upgrader) GetClusterConfiguration(ctx context.Context) (kubeadmv1beta3.ClusterConfiguration, *corev1.ConfigMap, error) {
|
||||
existingConf, err := u.stableInterface.GetCurrentConfigMap(ctx, constants.KubeadmConfigMap)
|
||||
existingConf, err := u.stableInterface.GetConfigMap(ctx, constants.KubeadmConfigMap)
|
||||
if err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("retrieving current kubeadm-config: %w", err)
|
||||
}
|
||||
@ -544,7 +546,7 @@ func upgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
||||
}
|
||||
|
||||
type helmInterface interface {
|
||||
Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string) error
|
||||
Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string, conformance bool, helmWaitMode helm.WaitMode, masterSecret uri.MasterSecret, serviceAccURI string, validK8sVersion versions.ValidK8sVersion, output terraform.ApplyOutput) error
|
||||
}
|
||||
|
||||
type debugLog interface {
|
||||
|
@ -8,14 +8,12 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
@ -186,7 +184,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
@ -337,118 +335,6 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateMeasurements(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
updater *fakeStableClient
|
||||
newConfig config.AttestationCfg
|
||||
wantUpdate bool
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
updater: &fakeStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"measurements":{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}}`),
|
||||
},
|
||||
},
|
||||
newConfig: &config.GCPSEVES{
|
||||
Measurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xBB, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"measurements are the same": {
|
||||
updater: &fakeStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"measurements":{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}}`),
|
||||
},
|
||||
},
|
||||
newConfig: &config.GCPSEVES{
|
||||
Measurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xAA, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
"setting warnOnly to true is allowed": {
|
||||
updater: &fakeStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"measurements":{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}}`),
|
||||
},
|
||||
},
|
||||
newConfig: &config.GCPSEVES{
|
||||
Measurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xAA, measurements.WarnOnly, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"setting warnOnly to false is allowed": {
|
||||
updater: &fakeStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"measurements":{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}}`),
|
||||
},
|
||||
},
|
||||
newConfig: &config.GCPSEVES{
|
||||
Measurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xAA, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"getCurrent error": {
|
||||
updater: &fakeStableClient{getErr: someErr},
|
||||
newConfig: &config.GCPSEVES{
|
||||
Measurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xBB, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"update error": {
|
||||
updater: &fakeStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"measurements":{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}}`),
|
||||
},
|
||||
updateErr: someErr,
|
||||
},
|
||||
newConfig: &config.GCPSEVES{
|
||||
Measurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xBB, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
upgrader := &Upgrader{
|
||||
stableInterface: tc.updater,
|
||||
outWriter: io.Discard,
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
err := upgrader.UpdateAttestationConfig(context.Background(), tc.newConfig)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
if tc.wantUpdate {
|
||||
newConfigJSON, err := json.Marshal(tc.newConfig)
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(string(newConfigJSON), tc.updater.updatedConfigMaps[constants.JoinConfigMap].Data[constants.AttestationConfigFilename])
|
||||
} else {
|
||||
assert.Nil(tc.updater.updatedConfigMaps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateImage(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
@ -626,7 +512,7 @@ type fakeStableClient struct {
|
||||
k8sErr error
|
||||
}
|
||||
|
||||
func (s *fakeStableClient) GetCurrentConfigMap(_ context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
func (s *fakeStableClient) GetConfigMap(_ context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
return s.configMaps[name], s.getErr
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,6 @@ go_library(
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
"//cli/internal/cloudcmd",
|
||||
"//cli/internal/clusterid",
|
||||
"//cli/internal/terraform",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/constants",
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
@ -140,44 +139,30 @@ func CleanUpTerraformMigrations(upgradeWorkspace, upgradeID string, fileHandler
|
||||
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
||||
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
||||
// By the new one.
|
||||
func (u *TerraformUpgrader) ApplyTerraformMigrations(ctx context.Context, opts TerraformUpgradeOptions, upgradeID string) (clusterid.File, error) {
|
||||
func (u *TerraformUpgrader) ApplyTerraformMigrations(ctx context.Context, opts TerraformUpgradeOptions, upgradeID string) (terraform.ApplyOutput, error) {
|
||||
tfOutput, err := u.tf.CreateCluster(ctx, opts.CSP, opts.LogLevel)
|
||||
if err != nil {
|
||||
return clusterid.File{}, fmt.Errorf("terraform apply: %w", err)
|
||||
return tfOutput, fmt.Errorf("terraform apply: %w", err)
|
||||
}
|
||||
|
||||
clusterID := clusterid.File{
|
||||
CloudProvider: opts.CSP,
|
||||
InitSecret: []byte(tfOutput.Secret),
|
||||
IP: tfOutput.IP,
|
||||
APIServerCertSANs: tfOutput.APIServerCertSANs,
|
||||
UID: tfOutput.UID,
|
||||
}
|
||||
|
||||
// Patch MAA policy if we applied an Azure upgrade.
|
||||
if tfOutput.Azure != nil {
|
||||
if err := u.policyPatcher.Patch(ctx, tfOutput.Azure.AttestationURL); err != nil {
|
||||
return clusterid.File{}, fmt.Errorf("patching policies: %w", err)
|
||||
return tfOutput, fmt.Errorf("patching policies: %w", err)
|
||||
}
|
||||
clusterID.AttestationURL = tfOutput.Azure.AttestationURL
|
||||
}
|
||||
|
||||
if err := u.fileHandler.RemoveAll(opts.TFWorkspace); err != nil {
|
||||
return clusterid.File{}, fmt.Errorf("removing old terraform directory: %w", err)
|
||||
return tfOutput, fmt.Errorf("removing old terraform directory: %w", err)
|
||||
}
|
||||
|
||||
if err := u.fileHandler.CopyDir(
|
||||
filepath.Join(opts.UpgradeWorkspace, upgradeID, constants.TerraformUpgradeWorkingDir),
|
||||
opts.TFWorkspace,
|
||||
); err != nil {
|
||||
return clusterid.File{}, fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
||||
return tfOutput, fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
||||
}
|
||||
|
||||
if err := u.fileHandler.RemoveAll(filepath.Join(opts.UpgradeWorkspace, upgradeID, constants.TerraformUpgradeWorkingDir)); err != nil {
|
||||
return clusterid.File{}, fmt.Errorf("removing terraform upgrade directory: %w", err)
|
||||
return tfOutput, fmt.Errorf("removing terraform upgrade directory: %w", err)
|
||||
}
|
||||
|
||||
return clusterID, nil
|
||||
return tfOutput, nil
|
||||
}
|
||||
|
||||
type tfClientCommon interface {
|
||||
|
@ -472,10 +472,12 @@ constellation upgrade apply [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for apply
|
||||
-y, --yes run upgrades without further confirmation
|
||||
WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs.
|
||||
WARNING: might unintentionally overwrite measurements in the running cluster.
|
||||
--conformance enable conformance mode
|
||||
-h, --help help for apply
|
||||
--skip-helm-wait install helm charts without waiting for deployments to be ready
|
||||
-y, --yes run upgrades without further confirmation
|
||||
WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs.
|
||||
WARNING: might unintentionally overwrite measurements in the running cluster.
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
@ -44,6 +44,7 @@ ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate
|
||||
|
||||
This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice).
|
||||
This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way.
|
||||
A failed upgrade due to an erroneous attestation config can also cause this error.
|
||||
You can change the expected measurements to resolve the failure.
|
||||
|
||||
:::caution
|
||||
@ -54,6 +55,14 @@ When in doubt, check if the encountered [issue is known](https://github.com/edge
|
||||
|
||||
:::
|
||||
|
||||
|
||||
:::tip
|
||||
|
||||
During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config-backup` config map in the `kube-system` namespace. To restore the old attestation config after a failed upgrade, you can copy the attestation config from this resource, put it in your configuration file and retry the upgrade.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
You can use the `upgrade apply` command to change measurements of a running cluster:
|
||||
|
||||
1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values.
|
||||
|
Loading…
Reference in New Issue
Block a user