cli: Terraform migrations on upgrade (#1685)

* add terraform planning

* overwrite terraform files in upgrade workspace

* Revert "overwrite terraform files in upgrade workspace"

This reverts commit 8bdacfb8bef23ef2cdbdb06bad0855b3bbc42df0.

* prepare terraform workspace

* test upgrade integration

* print upgrade abort

* rename plan file

* write output to file

* add show plan test

* add upgrade tf workdir

* fix workspace preparing

* squash to 1 command

* test

* bazel build

* plan test

* register flag manually

* bazel tidy

* fix linter

* remove MAA variable

* fix workdir

* accept tf variables

* variable fetching

* fix resource indices

* accept Terraform targets

* refactor upgrade command

* Terraform migration apply unit test

* pass down image fetcher to test

* use new flags in e2e test

* move file name to constant

* update buildfiles

* fix version constant

* conditionally create MAA

* move interface down

* upgrade dir

* update buildfiles

* fix interface

* fix createMAA check

* fix imports

* update buildfiles

* wip: workspace backup

* copy utils

* backup upgrade workspace

* remove debug print

* replace old state after upgrade

* check if flag exists

* prepare test workspace

* remove prefix

Co-authored-by: Otto Bittner <cobittner@posteo.net>

* respect file permissions

* refactor tf upgrader

* check workspace before upgrades

* remove temp upgrade dir after completion

* clean up workspace after abortion

* fix upgrade apply test

* fix linter

---------

Co-authored-by: Otto Bittner <cobittner@posteo.net>
This commit is contained in:
Moritz Sanft 2023-05-22 13:31:20 +02:00 committed by GitHub
parent 339e750c18
commit c69e6777bd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 1391 additions and 44 deletions

View file

@ -23,7 +23,7 @@ type imageFetcher interface {
type terraformClient interface { type terraformClient interface {
PrepareWorkspace(path string, input terraform.Variables) error PrepareWorkspace(path string, input terraform.Variables) error
CreateCluster(ctx context.Context, logLevel terraform.LogLevel) (terraform.CreateOutput, error) CreateCluster(ctx context.Context, logLevel terraform.LogLevel, targets ...string) (terraform.CreateOutput, error)
CreateIAMConfig(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error) CreateIAMConfig(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
Destroy(ctx context.Context, logLevel terraform.LogLevel) error Destroy(ctx context.Context, logLevel terraform.LogLevel) error
CleanUpWorkspace() error CleanUpWorkspace() error

View file

@ -45,7 +45,7 @@ type stubTerraformClient struct {
showErr error showErr error
} }
func (c *stubTerraformClient) CreateCluster(_ context.Context, _ terraform.LogLevel) (terraform.CreateOutput, error) { func (c *stubTerraformClient) CreateCluster(_ context.Context, _ terraform.LogLevel, _ ...string) (terraform.CreateOutput, error) {
return terraform.CreateOutput{ return terraform.CreateOutput{
IP: c.ip, IP: c.ip,
Secret: c.initSecret, Secret: c.initSecret,

View file

@ -41,9 +41,11 @@ go_library(
"//cli/internal/clusterid", "//cli/internal/clusterid",
"//cli/internal/helm", "//cli/internal/helm",
"//cli/internal/iamid", "//cli/internal/iamid",
"//cli/internal/image",
"//cli/internal/kubernetes", "//cli/internal/kubernetes",
"//cli/internal/libvirt", "//cli/internal/libvirt",
"//cli/internal/terraform", "//cli/internal/terraform",
"//cli/internal/upgrade",
"//disk-mapper/recoverproto", "//disk-mapper/recoverproto",
"//internal/atls", "//internal/atls",
"//internal/attestation/measurements", "//internal/attestation/measurements",
@ -122,6 +124,7 @@ go_test(
"//cli/internal/iamid", "//cli/internal/iamid",
"//cli/internal/kubernetes", "//cli/internal/kubernetes",
"//cli/internal/terraform", "//cli/internal/terraform",
"//cli/internal/upgrade",
"//disk-mapper/recoverproto", "//disk-mapper/recoverproto",
"//internal/atls", "//internal/atls",
"//internal/attestation/measurements", "//internal/attestation/measurements",

View file

@ -10,11 +10,16 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"path/filepath"
"strings"
"time" "time"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid" "github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/helm" "github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/image"
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes" "github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/compatibility" "github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
@ -55,17 +60,20 @@ func runUpgradeApply(cmd *cobra.Command, _ []string) error {
defer log.Sync() defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs()) fileHandler := file.NewHandler(afero.NewOsFs())
upgrader, err := kubernetes.NewUpgrader(cmd.OutOrStdout(), log) upgrader, err := kubernetes.NewUpgrader(cmd.Context(), cmd.OutOrStdout(), log)
if err != nil { if err != nil {
return err return err
} }
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log} fetcher := image.New()
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log, fetcher: fetcher}
return applyCmd.upgradeApply(cmd, fileHandler) return applyCmd.upgradeApply(cmd, fileHandler)
} }
type upgradeApplyCmd struct { type upgradeApplyCmd struct {
upgrader cloudUpgrader upgrader cloudUpgrader
fetcher imageFetcher
log debugLog log debugLog
} }
@ -94,6 +102,10 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
return fmt.Errorf("upgrading measurements: %w", err) return fmt.Errorf("upgrading measurements: %w", err)
} }
if err := u.migrateTerraform(cmd, fileHandler, u.fetcher, conf, flags); err != nil {
return fmt.Errorf("performing Terraform migrations: %w", err)
}
if conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP || conf.GetProvider() == cloudprovider.AWS { if conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP || conf.GetProvider() == cloudprovider.AWS {
err = u.handleServiceUpgrade(cmd, conf, flags) err = u.handleServiceUpgrade(cmd, conf, flags)
upgradeErr := &compatibility.InvalidUpgradeError{} upgradeErr := &compatibility.InvalidUpgradeError{}
@ -120,6 +132,141 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
return nil return nil
} }
// migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration
// of cloud resources with Terraform. If so, the migration is performed.
func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, file file.Handler, fetcher imageFetcher, conf *config.Config, flags upgradeApplyFlags) error {
u.log.Debugf("Planning Terraform migrations")
if err := u.upgrader.CheckTerraformMigrations(file); err != nil {
return fmt.Errorf("checking workspace: %w", err)
}
targets, vars, err := u.parseUpgradeVars(cmd, conf, fetcher)
if err != nil {
return fmt.Errorf("parsing upgrade variables: %w", err)
}
u.log.Debugf("Using migration targets:\n%v", targets)
u.log.Debugf("Using Terraform variables:\n%v", vars)
opts := upgrade.TerraformUpgradeOptions{
LogLevel: flags.terraformLogLevel,
CSP: conf.GetProvider(),
Vars: vars,
Targets: targets,
OutputFile: constants.TerraformMigrationOutputFile,
}
// Check if there are any Terraform migrations to apply
hasDiff, err := u.upgrader.PlanTerraformMigrations(cmd.Context(), opts)
if err != nil {
return fmt.Errorf("planning terraform migrations: %w", err)
}
if hasDiff {
// If there are any Terraform migrations to apply, ask for confirmation
if !flags.yes {
ok, err := askToConfirm(cmd, "Do you want to apply the Terraform migrations?")
if err != nil {
return fmt.Errorf("asking for confirmation: %w", err)
}
if !ok {
cmd.Println("Aborting upgrade.")
if err := u.upgrader.CleanUpTerraformMigrations(file); err != nil {
return fmt.Errorf("cleaning up workspace: %w", err)
}
return fmt.Errorf("aborted by user")
}
}
u.log.Debugf("Applying Terraform migrations")
err := u.upgrader.ApplyTerraformMigrations(cmd.Context(), file, opts)
if err != nil {
return fmt.Errorf("applying terraform migrations: %w", err)
}
cmd.Printf("Terraform migrations applied successfully and output written to: %s\n"+
"A backup of the pre-upgrade Terraform state has been written to: %s\n",
opts.OutputFile, filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir))
} else {
u.log.Debugf("No Terraform diff detected")
}
return nil
}
func (u *upgradeApplyCmd) parseUpgradeVars(cmd *cobra.Command, conf *config.Config, fetcher imageFetcher) ([]string, terraform.Variables, error) {
// Fetch variables to execute Terraform script with
imageRef, err := fetcher.FetchReference(cmd.Context(), conf)
if err != nil {
return nil, nil, fmt.Errorf("fetching image reference: %w", err)
}
commonVariables := terraform.CommonVariables{
Name: conf.Name,
StateDiskSizeGB: conf.StateDiskSizeGB,
// Ignore node count as their values are only being respected for creation
// See here: https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes
}
switch conf.GetProvider() {
case cloudprovider.AWS:
targets := []string{}
vars := &terraform.AWSClusterVariables{
CommonVariables: commonVariables,
StateDiskType: conf.Provider.AWS.StateDiskType,
Region: conf.Provider.AWS.Region,
Zone: conf.Provider.AWS.Zone,
InstanceType: conf.Provider.AWS.InstanceType,
AMIImageID: imageRef,
IAMProfileControlPlane: conf.Provider.AWS.IAMProfileControlPlane,
IAMProfileWorkerNodes: conf.Provider.AWS.IAMProfileWorkerNodes,
Debug: conf.IsDebugCluster(),
}
return targets, vars, nil
case cloudprovider.Azure:
targets := []string{"azurerm_attestation_provider.attestation_provider"}
// Azure Terraform provider is very strict about it's casing
imageRef = strings.Replace(imageRef, "CommunityGalleries", "communityGalleries", 1)
imageRef = strings.Replace(imageRef, "Images", "images", 1)
imageRef = strings.Replace(imageRef, "Versions", "versions", 1)
vars := &terraform.AzureClusterVariables{
CommonVariables: commonVariables,
Location: conf.Provider.Azure.Location,
ResourceGroup: conf.Provider.Azure.ResourceGroup,
UserAssignedIdentity: conf.Provider.Azure.UserAssignedIdentity,
InstanceType: conf.Provider.Azure.InstanceType,
StateDiskType: conf.Provider.Azure.StateDiskType,
ImageID: imageRef,
SecureBoot: *conf.Provider.Azure.SecureBoot,
CreateMAA: conf.GetAttestationConfig().GetVariant().Equal(variant.AzureSEVSNP{}),
Debug: conf.IsDebugCluster(),
}
return targets, vars, nil
case cloudprovider.GCP:
targets := []string{}
vars := &terraform.GCPClusterVariables{
CommonVariables: commonVariables,
Project: conf.Provider.GCP.Project,
Region: conf.Provider.GCP.Region,
Zone: conf.Provider.GCP.Zone,
CredentialsFile: conf.Provider.GCP.ServiceAccountKeyPath,
InstanceType: conf.Provider.GCP.InstanceType,
StateDiskType: conf.Provider.GCP.StateDiskType,
ImageID: imageRef,
Debug: conf.IsDebugCluster(),
}
return targets, vars, nil
default:
return nil, nil, fmt.Errorf("unsupported provider: %s", conf.GetProvider())
}
}
type imageFetcher interface {
FetchReference(ctx context.Context, conf *config.Config) (string, error)
}
// upgradeAttestConfigIfDiff checks if the locally configured measurements are different from the cluster's measurements. // upgradeAttestConfigIfDiff checks if the locally configured measurements are different from the cluster's measurements.
// If so the function will ask the user to confirm (if --yes is not set) and upgrade the measurements only. // If so the function will ask the user to confirm (if --yes is not set) and upgrade the measurements only.
func (u *upgradeApplyCmd) upgradeAttestConfigIfDiff(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error { func (u *upgradeApplyCmd) upgradeAttestConfigIfDiff(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
@ -193,14 +340,30 @@ func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
return upgradeApplyFlags{}, fmt.Errorf("parsing force argument: %w", err) return upgradeApplyFlags{}, fmt.Errorf("parsing force argument: %w", err)
} }
return upgradeApplyFlags{configPath: configPath, yes: yes, upgradeTimeout: timeout, force: force}, nil logLevelString, err := cmd.Flags().GetString("tf-log")
if err != nil {
return upgradeApplyFlags{}, fmt.Errorf("parsing tf-log string: %w", err)
}
logLevel, err := terraform.ParseLogLevel(logLevelString)
if err != nil {
return upgradeApplyFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
}
return upgradeApplyFlags{
configPath: configPath,
yes: yes,
upgradeTimeout: timeout,
force: force,
terraformLogLevel: logLevel,
}, nil
} }
type upgradeApplyFlags struct { type upgradeApplyFlags struct {
configPath string configPath string
yes bool yes bool
upgradeTimeout time.Duration upgradeTimeout time.Duration
force bool force bool
terraformLogLevel terraform.LogLevel
} }
type cloudUpgrader interface { type cloudUpgrader interface {
@ -208,4 +371,8 @@ type cloudUpgrader interface {
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
UpdateAttestationConfig(ctx context.Context, newConfig config.AttestationCfg) error UpdateAttestationConfig(ctx context.Context, newConfig config.AttestationCfg) error
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error)
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)
ApplyTerraformMigrations(ctx context.Context, fileHandler file.Handler, opts upgrade.TerraformUpgradeOptions) error
CheckTerraformMigrations(fileHandler file.Handler) error
CleanUpTerraformMigrations(fileHandler file.Handler) error
} }

View file

@ -7,6 +7,7 @@ SPDX-License-Identifier: AGPL-3.0-only
package cmd package cmd
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"testing" "testing"
@ -14,6 +15,7 @@ import (
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid" "github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes" "github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
@ -30,10 +32,14 @@ func TestUpgradeApply(t *testing.T) {
someErr := errors.New("some error") someErr := errors.New("some error")
testCases := map[string]struct { testCases := map[string]struct {
upgrader stubUpgrader upgrader stubUpgrader
fetcher stubImageFetcher
wantErr bool wantErr bool
yesFlag bool
stdin string
}{ }{
"success": { "success": {
upgrader: stubUpgrader{currentConfig: config.DefaultForAzureSEVSNP()}, upgrader: stubUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
yesFlag: true,
}, },
"nodeVersion some error": { "nodeVersion some error": {
upgrader: stubUpgrader{ upgrader: stubUpgrader{
@ -41,12 +47,14 @@ func TestUpgradeApply(t *testing.T) {
nodeVersionErr: someErr, nodeVersionErr: someErr,
}, },
wantErr: true, wantErr: true,
yesFlag: true,
}, },
"nodeVersion in progress error": { "nodeVersion in progress error": {
upgrader: stubUpgrader{ upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
nodeVersionErr: kubernetes.ErrInProgress, nodeVersionErr: kubernetes.ErrInProgress,
}, },
yesFlag: true,
}, },
"helm other error": { "helm other error": {
upgrader: stubUpgrader{ upgrader: stubUpgrader{
@ -54,6 +62,63 @@ func TestUpgradeApply(t *testing.T) {
helmErr: someErr, helmErr: someErr,
}, },
wantErr: true, wantErr: true,
fetcher: stubImageFetcher{},
yesFlag: true,
},
"check terraform error": {
upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
checkTerraformErr: someErr,
},
fetcher: stubImageFetcher{},
wantErr: true,
yesFlag: true,
},
"abort": {
upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
terraformDiff: true,
},
fetcher: stubImageFetcher{},
wantErr: true,
stdin: "no\n",
},
"clean terraform error": {
upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
cleanTerraformErr: someErr,
terraformDiff: true,
},
fetcher: stubImageFetcher{},
wantErr: true,
stdin: "no\n",
},
"plan terraform error": {
upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
planTerraformErr: someErr,
},
fetcher: stubImageFetcher{},
wantErr: true,
yesFlag: true,
},
"apply terraform error": {
upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
applyTerraformErr: someErr,
terraformDiff: true,
},
fetcher: stubImageFetcher{},
wantErr: true,
yesFlag: true,
},
"fetch reference error": {
upgrader: stubUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
},
fetcher: stubImageFetcher{fetchReferenceErr: someErr},
wantErr: true,
yesFlag: true,
}, },
} }
@ -62,19 +127,23 @@ func TestUpgradeApply(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
require := require.New(t) require := require.New(t)
cmd := newUpgradeApplyCmd() cmd := newUpgradeApplyCmd()
cmd.SetIn(bytes.NewBufferString(tc.stdin))
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
cmd.Flags().Bool("force", true, "") // register persistent flag manually cmd.Flags().Bool("force", true, "") // register persistent flag manually
cmd.Flags().String("tf-log", "DEBUG", "") // register persistent flag manually
err := cmd.Flags().Set("yes", "true") if tc.yesFlag {
require.NoError(err) err := cmd.Flags().Set("yes", "true")
require.NoError(err)
}
handler := file.NewHandler(afero.NewMemMapFs()) handler := file.NewHandler(afero.NewMemMapFs())
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure) cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg)) require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
require.NoError(handler.WriteJSON(constants.ClusterIDsFileName, clusterid.File{})) require.NoError(handler.WriteJSON(constants.ClusterIDsFileName, clusterid.File{}))
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t)} upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), fetcher: tc.fetcher}
err = upgrader.upgradeApply(cmd, handler) err := upgrader.upgradeApply(cmd, handler)
if tc.wantErr { if tc.wantErr {
assert.Error(err) assert.Error(err)
} else { } else {
@ -85,9 +154,14 @@ func TestUpgradeApply(t *testing.T) {
} }
type stubUpgrader struct { type stubUpgrader struct {
currentConfig config.AttestationCfg currentConfig config.AttestationCfg
nodeVersionErr error nodeVersionErr error
helmErr error helmErr error
terraformDiff bool
planTerraformErr error
checkTerraformErr error
applyTerraformErr error
cleanTerraformErr error
} }
func (u stubUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config) error { func (u stubUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config) error {
@ -105,3 +179,27 @@ func (u stubUpgrader) UpdateAttestationConfig(_ context.Context, _ config.Attest
func (u stubUpgrader) GetClusterAttestationConfig(_ context.Context, _ variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error) { func (u stubUpgrader) GetClusterAttestationConfig(_ context.Context, _ variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error) {
return u.currentConfig, &corev1.ConfigMap{}, nil return u.currentConfig, &corev1.ConfigMap{}, nil
} }
func (u stubUpgrader) CheckTerraformMigrations(file.Handler) error {
return u.checkTerraformErr
}
func (u stubUpgrader) CleanUpTerraformMigrations(file.Handler) error {
return u.cleanTerraformErr
}
func (u stubUpgrader) PlanTerraformMigrations(context.Context, upgrade.TerraformUpgradeOptions) (bool, error) {
return u.terraformDiff, u.planTerraformErr
}
func (u stubUpgrader) ApplyTerraformMigrations(context.Context, file.Handler, upgrade.TerraformUpgradeOptions) error {
return u.applyTerraformErr
}
type stubImageFetcher struct {
fetchReferenceErr error
}
func (s stubImageFetcher) FetchReference(context.Context, *config.Config) (string, error) {
return "", s.fetchReferenceErr
}

View file

@ -62,7 +62,7 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
if err != nil { if err != nil {
return err return err
} }
checker, err := kubernetes.NewUpgrader(cmd.OutOrStdout(), log) checker, err := kubernetes.NewUpgrader(cmd.Context(), cmd.OutOrStdout(), log)
if err != nil { if err != nil {
return err return err
} }

View file

@ -11,14 +11,15 @@ import (
"fmt" "fmt"
"path/filepath" "path/filepath"
"github.com/edgelesssys/constellation/v2/internal/constants"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
) )
const ( var (
crdBackupFolder = "constellation-upgrade/backups/crds/" backupFolder = filepath.Join(constants.UpgradeDir, "backups") + string(filepath.Separator)
backupFolder = "constellation-upgrade/backups/" crdBackupFolder = filepath.Join(backupFolder, "crds") + string(filepath.Separator)
) )
func (c *Client) backupCRDs(ctx context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) { func (c *Client) backupCRDs(ctx context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) {

View file

@ -13,10 +13,13 @@ go_library(
deps = [ deps = [
"//cli/internal/helm", "//cli/internal/helm",
"//cli/internal/image", "//cli/internal/image",
"//cli/internal/terraform",
"//cli/internal/upgrade",
"//internal/attestation/measurements", "//internal/attestation/measurements",
"//internal/compatibility", "//internal/compatibility",
"//internal/config", "//internal/config",
"//internal/constants", "//internal/constants",
"//internal/file",
"//internal/kubernetes", "//internal/kubernetes",
"//internal/kubernetes/kubectl", "//internal/kubernetes/kubectl",
"//internal/variant", "//internal/variant",

View file

@ -12,14 +12,18 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"path/filepath"
"time" "time"
"github.com/edgelesssys/constellation/v2/cli/internal/helm" "github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/image" "github.com/edgelesssys/constellation/v2/cli/internal/image"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/compatibility" "github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes" internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl" "github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
"github.com/edgelesssys/constellation/v2/internal/variant" "github.com/edgelesssys/constellation/v2/internal/variant"
@ -77,11 +81,12 @@ type Upgrader struct {
helmClient helmInterface helmClient helmInterface
imageFetcher imageFetcher imageFetcher imageFetcher
outWriter io.Writer outWriter io.Writer
tfUpgrader *upgrade.TerraformUpgrader
log debugLog log debugLog
} }
// NewUpgrader returns a new Upgrader. // NewUpgrader returns a new Upgrader.
func NewUpgrader(outWriter io.Writer, log debugLog) (*Upgrader, error) { func NewUpgrader(ctx context.Context, outWriter io.Writer, log debugLog) (*Upgrader, error) {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", constants.AdminConfFilename) kubeConfig, err := clientcmd.BuildConfigFromFlags("", constants.AdminConfFilename)
if err != nil { if err != nil {
return nil, fmt.Errorf("building kubernetes config: %w", err) return nil, fmt.Errorf("building kubernetes config: %w", err)
@ -103,16 +108,54 @@ func NewUpgrader(outWriter io.Writer, log debugLog) (*Upgrader, error) {
return nil, fmt.Errorf("setting up helm client: %w", err) return nil, fmt.Errorf("setting up helm client: %w", err)
} }
tfClient, err := terraform.New(ctx, filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir))
if err != nil {
return nil, fmt.Errorf("setting up terraform client: %w", err)
}
tfUpgrader, err := upgrade.NewTerraformUpgrader(tfClient, outWriter)
if err != nil {
return nil, fmt.Errorf("setting up terraform upgrader: %w", err)
}
return &Upgrader{ return &Upgrader{
stableInterface: &stableClient{client: kubeClient}, stableInterface: &stableClient{client: kubeClient},
dynamicInterface: &NodeVersionClient{client: unstructuredClient}, dynamicInterface: &NodeVersionClient{client: unstructuredClient},
helmClient: helmClient, helmClient: helmClient,
imageFetcher: image.New(), imageFetcher: image.New(),
outWriter: outWriter, outWriter: outWriter,
tfUpgrader: tfUpgrader,
log: log, log: log,
}, nil }, nil
} }
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
// If the files that will be written during the upgrade already exist, it returns an error.
func (u *Upgrader) CheckTerraformMigrations(fileHandler file.Handler) error {
return u.tfUpgrader.CheckTerraformMigrations(fileHandler)
}
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
// aborted by the user.
func (u *Upgrader) CleanUpTerraformMigrations(fileHandler file.Handler) error {
return u.tfUpgrader.CleanUpTerraformMigrations(fileHandler)
}
// PlanTerraformMigrations prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade.
// If a diff exists, it's being written to the upgrader's output writer. It also returns
// a bool indicating whether a diff exists.
func (u *Upgrader) PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error) {
return u.tfUpgrader.PlanTerraformMigrations(ctx, opts)
}
// ApplyTerraformMigrations applies the migerations planned by PlanTerraformMigrations.
// If PlanTerraformMigrations has not been executed before, it will return an error.
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
// By the new one.
func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, fileHandler file.Handler, opts upgrade.TerraformUpgradeOptions) error {
return u.tfUpgrader.ApplyTerraformMigrations(ctx, fileHandler, opts)
}
// UpgradeHelmServices upgrade helm services. // UpgradeHelmServices upgrade helm services.
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error { func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive) return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive)

View file

@ -10,10 +10,12 @@ import (
"bytes" "bytes"
"embed" "embed"
"errors" "errors"
"fmt"
"io/fs" "io/fs"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero" "github.com/spf13/afero"
) )
@ -27,8 +29,35 @@ var terraformFS embed.FS
// prepareWorkspace loads the embedded Terraform files, // prepareWorkspace loads the embedded Terraform files,
// and writes them into the workspace. // and writes them into the workspace.
func prepareWorkspace(path string, fileHandler file.Handler, workingDir string) error { func prepareWorkspace(rootDir string, fileHandler file.Handler, workingDir string) error {
rootDir := path return terraformCopier(fileHandler, rootDir, workingDir)
}
// prepareUpgradeWorkspace takes the Terraform state file from the old workspace and the
// embedded Terraform files and writes them into the new workspace.
func prepareUpgradeWorkspace(rootDir string, fileHandler file.Handler, oldWorkingDir, newWorkingDir string) error {
// backup old workspace
if err := fileHandler.CopyDir(
oldWorkingDir,
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir),
); err != nil {
return fmt.Errorf("backing up old workspace: %w", err)
}
// copy state file
if err := fileHandler.CopyFile(
filepath.Join(oldWorkingDir, "terraform.tfstate"),
filepath.Join(newWorkingDir, "terraform.tfstate"),
file.OptMkdirAll,
); err != nil {
return fmt.Errorf("copying state file: %w", err)
}
return terraformCopier(fileHandler, rootDir, newWorkingDir)
}
// terraformCopier copies the embedded Terraform files into the workspace.
func terraformCopier(fileHandler file.Handler, rootDir, workingDir string) error {
return fs.WalkDir(terraformFS, rootDir, func(path string, d fs.DirEntry, err error) error { return fs.WalkDir(terraformFS, rootDir, func(path string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
return err return err

View file

@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestLoader(t *testing.T) { func TestPrepareWorkspace(t *testing.T) {
testCases := map[string]struct { testCases := map[string]struct {
pathBase string pathBase string
provider cloudprovider.Provider provider cloudprovider.Provider
@ -109,29 +109,114 @@ func TestLoader(t *testing.T) {
err := prepareWorkspace(path, file, constants.TerraformWorkingDir) err := prepareWorkspace(path, file, constants.TerraformWorkingDir)
require.NoError(err) require.NoError(err)
checkFiles(t, file, func(err error) { assert.NoError(err) }, tc.fileList) checkFiles(t, file, func(err error) { assert.NoError(err) }, constants.TerraformWorkingDir, tc.fileList)
if tc.testAlreadyUnpacked { if tc.testAlreadyUnpacked {
// Let's try the same again and check if we don't get a "file already exists" error. // Let's try the same again and check if we don't get a "file already exists" error.
require.NoError(file.Remove(filepath.Join(constants.TerraformWorkingDir, "variables.tf"))) require.NoError(file.Remove(filepath.Join(constants.TerraformWorkingDir, "variables.tf")))
err := prepareWorkspace(path, file, constants.TerraformWorkingDir) err := prepareWorkspace(path, file, constants.TerraformWorkingDir)
assert.NoError(err) assert.NoError(err)
checkFiles(t, file, func(err error) { assert.NoError(err) }, tc.fileList) checkFiles(t, file, func(err error) { assert.NoError(err) }, constants.TerraformWorkingDir, tc.fileList)
} }
err = cleanUpWorkspace(file, constants.TerraformWorkingDir) err = cleanUpWorkspace(file, constants.TerraformWorkingDir)
require.NoError(err) require.NoError(err)
checkFiles(t, file, func(err error) { assert.ErrorIs(err, fs.ErrNotExist) }, tc.fileList) checkFiles(t, file, func(err error) { assert.ErrorIs(err, fs.ErrNotExist) }, constants.TerraformWorkingDir, tc.fileList)
}) })
} }
} }
func checkFiles(t *testing.T, file file.Handler, assertion func(error), files []string) { func TestPrepareUpgradeWorkspace(t *testing.T) {
testCases := map[string]struct {
pathBase string
provider cloudprovider.Provider
oldWorkingDir string
newWorkingDir string
oldWorkspaceFiles []string
newWorkspaceFiles []string
expectedFiles []string
testAlreadyUnpacked bool
wantErr bool
}{
"works": {
pathBase: "terraform",
provider: cloudprovider.AWS,
oldWorkingDir: "old",
newWorkingDir: "new",
oldWorkspaceFiles: []string{"terraform.tfstate"},
expectedFiles: []string{
"main.tf",
"variables.tf",
"outputs.tf",
"modules",
"terraform.tfstate",
},
},
"state file does not exist": {
pathBase: "terraform",
provider: cloudprovider.AWS,
oldWorkingDir: "old",
newWorkingDir: "new",
oldWorkspaceFiles: []string{},
expectedFiles: []string{},
wantErr: true,
},
"terraform files already exist in new dir": {
pathBase: "terraform",
provider: cloudprovider.AWS,
oldWorkingDir: "old",
newWorkingDir: "new",
oldWorkspaceFiles: []string{"terraform.tfstate"},
newWorkspaceFiles: []string{"main.tf"},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
file := file.NewHandler(afero.NewMemMapFs())
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
createFiles(t, file, tc.oldWorkspaceFiles, tc.oldWorkingDir)
createFiles(t, file, tc.newWorkspaceFiles, tc.newWorkingDir)
err := prepareUpgradeWorkspace(path, file, tc.oldWorkingDir, tc.newWorkingDir)
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
}
checkFiles(t, file, func(err error) { assert.NoError(err) }, tc.newWorkingDir, tc.expectedFiles)
checkFiles(t, file, func(err error) { assert.NoError(err) },
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir),
tc.oldWorkspaceFiles,
)
})
}
}
func checkFiles(t *testing.T, fileHandler file.Handler, assertion func(error), dir string, files []string) {
t.Helper() t.Helper()
for _, f := range files { for _, f := range files {
path := filepath.Join(constants.TerraformWorkingDir, f) path := filepath.Join(dir, f)
_, err := file.Stat(path) _, err := fileHandler.Stat(path)
assertion(err) assertion(err)
} }
} }
func createFiles(t *testing.T, fileHandler file.Handler, fileList []string, targetDir string) {
t.Helper()
require := require.New(t)
for _, f := range fileList {
path := filepath.Join(targetDir, f)
err := fileHandler.Write(path, []byte("1234"), file.OptOverwrite, file.OptMkdirAll)
require.NoError(err)
}
}

View file

@ -18,6 +18,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"path/filepath" "path/filepath"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -78,14 +79,24 @@ func (c *Client) Show(ctx context.Context) (*tfjson.State, error) {
// PrepareWorkspace prepares a Terraform workspace for a Constellation cluster. // PrepareWorkspace prepares a Terraform workspace for a Constellation cluster.
func (c *Client) PrepareWorkspace(path string, vars Variables) error { func (c *Client) PrepareWorkspace(path string, vars Variables) error {
if err := prepareWorkspace(path, c.file, c.workingDir); err != nil { if err := prepareWorkspace(path, c.file, c.workingDir); err != nil {
return err return fmt.Errorf("prepare workspace: %w", err)
}
return c.writeVars(vars)
}
// PrepareUpgradeWorkspace prepares a Terraform workspace for a Constellation version upgrade.
// It copies the Terraform state from the old working dir and the embedded Terraform files into the new working dir.
func (c *Client) PrepareUpgradeWorkspace(path, oldWorkingDir, newWorkingDir string, vars Variables) error {
if err := prepareUpgradeWorkspace(path, c.file, oldWorkingDir, newWorkingDir); err != nil {
return fmt.Errorf("prepare upgrade workspace: %w", err)
} }
return c.writeVars(vars) return c.writeVars(vars)
} }
// CreateCluster creates a Constellation cluster using Terraform. // CreateCluster creates a Constellation cluster using Terraform.
func (c *Client) CreateCluster(ctx context.Context, logLevel LogLevel) (CreateOutput, error) { func (c *Client) CreateCluster(ctx context.Context, logLevel LogLevel, targets ...string) (CreateOutput, error) {
if err := c.setLogLevel(logLevel); err != nil { if err := c.setLogLevel(logLevel); err != nil {
return CreateOutput{}, fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err) return CreateOutput{}, fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
} }
@ -94,7 +105,12 @@ func (c *Client) CreateCluster(ctx context.Context, logLevel LogLevel) (CreateOu
return CreateOutput{}, fmt.Errorf("terraform init: %w", err) return CreateOutput{}, fmt.Errorf("terraform init: %w", err)
} }
if err := c.tf.Apply(ctx); err != nil { opts := []tfexec.ApplyOption{}
for _, target := range targets {
opts = append(opts, tfexec.Target(target))
}
if err := c.tf.Apply(ctx, opts...); err != nil {
return CreateOutput{}, fmt.Errorf("terraform apply: %w", err) return CreateOutput{}, fmt.Errorf("terraform apply: %w", err)
} }
@ -294,6 +310,45 @@ func (c *Client) CreateIAMConfig(ctx context.Context, provider cloudprovider.Pro
} }
} }
// Plan determines the diff that will be applied by Terraform. The plan output is written to the planFile.
// If there is a diff, the returned bool is true. Otherwise, it is false.
func (c *Client) Plan(ctx context.Context, logLevel LogLevel, planFile string, targets ...string) (bool, error) {
if err := c.setLogLevel(logLevel); err != nil {
return false, fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
}
if err := c.tf.Init(ctx); err != nil {
return false, fmt.Errorf("terraform init: %w", err)
}
opts := []tfexec.PlanOption{
tfexec.Out(planFile),
}
for _, target := range targets {
opts = append(opts, tfexec.Target(target))
}
return c.tf.Plan(ctx, opts...)
}
// ShowPlan formats the diff in planFilePath and writes it to the specified output.
func (c *Client) ShowPlan(ctx context.Context, logLevel LogLevel, planFilePath string, output io.Writer) error {
if err := c.setLogLevel(logLevel); err != nil {
return fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
}
planResult, err := c.tf.ShowPlanFileRaw(ctx, planFilePath)
if err != nil {
return fmt.Errorf("terraform show plan: %w", err)
}
_, err = output.Write([]byte(planResult))
if err != nil {
return fmt.Errorf("write plan output: %w", err)
}
return nil
}
// Destroy destroys Terraform-created cloud resources. // Destroy destroys Terraform-created cloud resources.
func (c *Client) Destroy(ctx context.Context, logLevel LogLevel) error { func (c *Client) Destroy(ctx context.Context, logLevel LogLevel) error {
if err := c.setLogLevel(logLevel); err != nil { if err := c.setLogLevel(logLevel); err != nil {
@ -386,6 +441,8 @@ type tfInterface interface {
Destroy(context.Context, ...tfexec.DestroyOption) error Destroy(context.Context, ...tfexec.DestroyOption) error
Init(context.Context, ...tfexec.InitOption) error Init(context.Context, ...tfexec.InitOption) error
Show(context.Context, ...tfexec.ShowOption) (*tfjson.State, error) Show(context.Context, ...tfexec.ShowOption) (*tfjson.State, error)
Plan(ctx context.Context, opts ...tfexec.PlanOption) (bool, error)
ShowPlanFileRaw(ctx context.Context, planPath string, opts ...tfexec.ShowOption) (string, error)
SetLog(level string) error SetLog(level string) error
SetLogPath(path string) error SetLogPath(path string) error
} }

View file

@ -7,6 +7,7 @@ SPDX-License-Identifier: AGPL-3.0-only
package terraform package terraform
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"io/fs" "io/fs"
@ -934,14 +935,143 @@ func TestLogLevelString(t *testing.T) {
} }
} }
func TestPlan(t *testing.T) {
someError := errors.New("some error")
testCases := map[string]struct {
pathBase string
tf *stubTerraform
fs afero.Fs
wantErr bool
}{
"plan succeeds": {
pathBase: "terraform",
tf: &stubTerraform{},
fs: afero.NewMemMapFs(),
},
"set log path fails": {
pathBase: "terraform",
tf: &stubTerraform{
setLogPathErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"set log fails": {
pathBase: "terraform",
tf: &stubTerraform{
setLogErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"plan fails": {
pathBase: "terraform",
tf: &stubTerraform{
planJSONErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"init fails": {
pathBase: "terraform",
tf: &stubTerraform{
initErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
c := &Client{
file: file.NewHandler(tc.fs),
tf: tc.tf,
workingDir: tc.pathBase,
}
_, err := c.Plan(context.Background(), LogLevelDebug, constants.TerraformUpgradePlanFile)
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
}
})
}
}
func TestShowPlan(t *testing.T) {
someError := errors.New("some error")
testCases := map[string]struct {
pathBase string
tf *stubTerraform
fs afero.Fs
wantErr bool
}{
"show plan succeeds": {
pathBase: "terraform",
tf: &stubTerraform{},
fs: afero.NewMemMapFs(),
},
"set log path fails": {
pathBase: "terraform",
tf: &stubTerraform{
setLogPathErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"set log fails": {
pathBase: "terraform",
tf: &stubTerraform{
setLogErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"show plan file fails": {
pathBase: "terraform",
tf: &stubTerraform{
showPlanFileErr: someError,
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
c := &Client{
file: file.NewHandler(tc.fs),
tf: tc.tf,
workingDir: tc.pathBase,
}
err := c.ShowPlan(context.Background(), LogLevelDebug, "", bytes.NewBuffer(nil))
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
}
})
}
}
type stubTerraform struct { type stubTerraform struct {
applyErr error applyErr error
destroyErr error destroyErr error
initErr error initErr error
showErr error showErr error
setLogErr error setLogErr error
setLogPathErr error setLogPathErr error
showState *tfjson.State planJSONErr error
showPlanFileErr error
showState *tfjson.State
} }
func (s *stubTerraform) Apply(context.Context, ...tfexec.ApplyOption) error { func (s *stubTerraform) Apply(context.Context, ...tfexec.ApplyOption) error {
@ -960,6 +1090,14 @@ func (s *stubTerraform) Show(context.Context, ...tfexec.ShowOption) (*tfjson.Sta
return s.showState, s.showErr return s.showState, s.showErr
} }
func (s *stubTerraform) Plan(context.Context, ...tfexec.PlanOption) (bool, error) {
return false, s.planJSONErr
}
func (s *stubTerraform) ShowPlanFileRaw(context.Context, string, ...tfexec.ShowOption) (string, error) {
return "", s.showPlanFileErr
}
func (s *stubTerraform) SetLog(_ string) error { func (s *stubTerraform) SetLog(_ string) error {
return s.setLogErr return s.setLogErr
} }

View file

@ -0,0 +1,34 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("//bazel/go:go_test.bzl", "go_test")
go_library(
name = "upgrade",
srcs = [
"terraform.go",
"upgrade.go",
],
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/upgrade",
visibility = ["//cli:__subpackages__"],
deps = [
"//cli/internal/clusterid",
"//cli/internal/terraform",
"//internal/cloud/cloudprovider",
"//internal/constants",
"//internal/file",
],
)
go_test(
name = "upgrade_test",
srcs = ["terraform_test.go"],
embed = [":upgrade"],
deps = [
"//cli/internal/terraform",
"//internal/cloud/cloudprovider",
"//internal/constants",
"//internal/file",
"@com_github_spf13_afero//:afero",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
],
)

View file

@ -0,0 +1,175 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package upgrade
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
)
// NewTerraformUpgrader returns a new TerraformUpgrader.
func NewTerraformUpgrader(tfClient tfClient, outWriter io.Writer) (*TerraformUpgrader, error) {
return &TerraformUpgrader{
tf: tfClient,
outWriter: outWriter,
}, nil
}
// TerraformUpgrader is responsible for performing Terraform migrations on cluster upgrades.
type TerraformUpgrader struct {
tf tfClient
outWriter io.Writer
}
// TerraformUpgradeOptions are the options used for the Terraform upgrade.
type TerraformUpgradeOptions struct {
// LogLevel is the log level used for Terraform.
LogLevel terraform.LogLevel
// CSP is the cloud provider to perform the upgrade on.
CSP cloudprovider.Provider
// Vars are the Terraform variables used for the upgrade.
Vars terraform.Variables
// Targets are the Terraform targets used for the upgrade.
Targets []string
// OutputFile is the file to write the Terraform output to.
OutputFile string
}
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
// If the files that will be written during the upgrade already exist, it returns an error.
func (u *TerraformUpgrader) CheckTerraformMigrations(fileHandler file.Handler) error {
var existingFiles []string
filesToCheck := []string{
constants.TerraformMigrationOutputFile,
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir),
}
for _, f := range filesToCheck {
if err := checkFileExists(fileHandler, &existingFiles, f); err != nil {
return fmt.Errorf("checking terraform migrations: %w", err)
}
}
if len(existingFiles) > 0 {
return fmt.Errorf("file(s) %s already exist", strings.Join(existingFiles, ", "))
}
return nil
}
// checkFileExists checks whether a file exists and adds it to the existingFiles slice if it does.
func checkFileExists(fileHandler file.Handler, existingFiles *[]string, filename string) error {
_, err := fileHandler.Stat(filename)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("checking %s: %w", filename, err)
}
return nil
}
*existingFiles = append(*existingFiles, filename)
return nil
}
// PlanTerraformMigrations prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade.
// If a diff exists, it's being written to the upgrader's output writer. It also returns
// a bool indicating whether a diff exists.
func (u *TerraformUpgrader) PlanTerraformMigrations(ctx context.Context, opts TerraformUpgradeOptions) (bool, error) {
err := u.tf.PrepareUpgradeWorkspace(
filepath.Join("terraform", strings.ToLower(opts.CSP.String())),
constants.TerraformWorkingDir,
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir),
opts.Vars,
)
if err != nil {
return false, fmt.Errorf("preparing terraform workspace: %w", err)
}
hasDiff, err := u.tf.Plan(ctx, opts.LogLevel, constants.TerraformUpgradePlanFile, opts.Targets...)
if err != nil {
return false, fmt.Errorf("terraform plan: %w", err)
}
if hasDiff {
if err := u.tf.ShowPlan(ctx, opts.LogLevel, constants.TerraformUpgradePlanFile, u.outWriter); err != nil {
return false, fmt.Errorf("terraform show plan: %w", err)
}
}
return hasDiff, nil
}
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
// aborted by the user.
func (u *TerraformUpgrader) CleanUpTerraformMigrations(fileHandler file.Handler) error {
cleanupFiles := []string{
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir),
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir),
}
for _, f := range cleanupFiles {
if err := fileHandler.RemoveAll(f); err != nil {
return fmt.Errorf("cleaning up file %s: %w", f, err)
}
}
return nil
}
// ApplyTerraformMigrations applies the migerations planned by PlanTerraformMigrations.
// If PlanTerraformMigrations has not been executed before, it will return an error.
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
// By the new one.
func (u *TerraformUpgrader) ApplyTerraformMigrations(ctx context.Context, fileHandler file.Handler, opts TerraformUpgradeOptions) error {
tfOutput, err := u.tf.CreateCluster(ctx, opts.LogLevel, opts.Targets...)
if err != nil {
return fmt.Errorf("terraform apply: %w", err)
}
outputFileContents := clusterid.File{
CloudProvider: opts.CSP,
InitSecret: []byte(tfOutput.Secret),
IP: tfOutput.IP,
UID: tfOutput.UID,
AttestationURL: tfOutput.AttestationURL,
}
if err := fileHandler.RemoveAll(constants.TerraformWorkingDir); err != nil {
return fmt.Errorf("removing old terraform directory: %w", err)
}
if err := fileHandler.CopyDir(filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir), constants.TerraformWorkingDir); err != nil {
return fmt.Errorf("replacing old terraform directory with new one: %w", err)
}
if err := fileHandler.RemoveAll(filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir)); err != nil {
return fmt.Errorf("removing terraform upgrade directory: %w", err)
}
if err := fileHandler.WriteJSON(opts.OutputFile, outputFileContents); err != nil {
return fmt.Errorf("writing terraform output to file: %w", err)
}
return nil
}
// a tfClient performs the Terraform interactions in an upgrade.
type tfClient interface {
PrepareUpgradeWorkspace(path, oldWorkingDir, newWorkingDir string, vars terraform.Variables) error
ShowPlan(ctx context.Context, logLevel terraform.LogLevel, planFilePath string, output io.Writer) error
Plan(ctx context.Context, logLevel terraform.LogLevel, planFile string, targets ...string) (bool, error)
CreateCluster(ctx context.Context, logLevel terraform.LogLevel, targets ...string) (terraform.CreateOutput, error)
}

View file

@ -0,0 +1,313 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package upgrade
import (
"bytes"
"context"
"io"
"path/filepath"
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCheckTerraformMigrations(t *testing.T) {
upgrader := func() *TerraformUpgrader {
u, err := NewTerraformUpgrader(&stubTerraformClient{}, bytes.NewBuffer(nil))
require.NoError(t, err)
return u
}
workspace := func(existingFiles []string) file.Handler {
fs := afero.NewMemMapFs()
for _, f := range existingFiles {
require.NoError(t, afero.WriteFile(fs, f, []byte{}, 0o644))
}
return file.NewHandler(fs)
}
testCases := map[string]struct {
workspace file.Handler
wantErr bool
}{
"success": {
workspace: workspace(nil),
},
"migration output file already exists": {
workspace: workspace([]string{constants.TerraformMigrationOutputFile}),
wantErr: true,
},
"terraform backup dir already exists": {
workspace: workspace([]string{filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir)}),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
u := upgrader()
err := u.CheckTerraformMigrations(tc.workspace)
if tc.wantErr {
require.Error(t, err)
return
}
require.NoError(t, err)
})
}
}
func TestPlanTerraformMigrations(t *testing.T) {
upgrader := func(tf tfClient) *TerraformUpgrader {
u, err := NewTerraformUpgrader(tf, bytes.NewBuffer(nil))
require.NoError(t, err)
return u
}
testCases := map[string]struct {
tf tfClient
want bool
wantErr bool
}{
"success no diff": {
tf: &stubTerraformClient{},
},
"success diff": {
tf: &stubTerraformClient{
hasDiff: true,
},
want: true,
},
"prepare workspace error": {
tf: &stubTerraformClient{
prepareWorkspaceErr: assert.AnError,
},
wantErr: true,
},
"plan error": {
tf: &stubTerraformClient{
planErr: assert.AnError,
},
wantErr: true,
},
"show plan error no diff": {
tf: &stubTerraformClient{
showErr: assert.AnError,
},
},
"show plan error diff": {
tf: &stubTerraformClient{
showErr: assert.AnError,
hasDiff: true,
},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
u := upgrader(tc.tf)
opts := TerraformUpgradeOptions{
LogLevel: terraform.LogLevelDebug,
CSP: cloudprovider.Unknown,
Vars: &terraform.QEMUVariables{},
}
diff, err := u.PlanTerraformMigrations(context.Background(), opts)
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
require.Equal(tc.want, diff)
}
})
}
}
func TestApplyTerraformMigrations(t *testing.T) {
upgrader := func(tf tfClient) *TerraformUpgrader {
u, err := NewTerraformUpgrader(tf, bytes.NewBuffer(nil))
require.NoError(t, err)
return u
}
fileHandler := func(existingFiles ...string) file.Handler {
fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(t,
fh.Write(
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir, "someFile"),
[]byte("some content"),
))
for _, f := range existingFiles {
require.NoError(t, fh.Write(f, []byte("some content")))
}
return fh
}
testCases := map[string]struct {
tf tfClient
fs file.Handler
outputFileName string
wantErr bool
}{
"success": {
tf: &stubTerraformClient{},
fs: fileHandler(),
outputFileName: "test.json",
},
"create cluster error": {
tf: &stubTerraformClient{
CreateClusterErr: assert.AnError,
},
fs: fileHandler(),
outputFileName: "test.json",
wantErr: true,
},
"empty file name": {
tf: &stubTerraformClient{},
fs: fileHandler(),
outputFileName: "",
wantErr: true,
},
"file already exists": {
tf: &stubTerraformClient{},
fs: fileHandler("test.json"),
outputFileName: "test.json",
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
u := upgrader(tc.tf)
opts := TerraformUpgradeOptions{
LogLevel: terraform.LogLevelDebug,
CSP: cloudprovider.Unknown,
Vars: &terraform.QEMUVariables{},
OutputFile: tc.outputFileName,
}
err := u.ApplyTerraformMigrations(context.Background(), tc.fs, opts)
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
}
})
}
}
func TestCleanUpTerraformMigrations(t *testing.T) {
upgrader := func() *TerraformUpgrader {
u, err := NewTerraformUpgrader(&stubTerraformClient{}, bytes.NewBuffer(nil))
require.NoError(t, err)
return u
}
workspace := func(existingFiles []string) file.Handler {
fs := afero.NewMemMapFs()
for _, f := range existingFiles {
require.NoError(t, afero.WriteFile(fs, f, []byte{}, 0o644))
}
return file.NewHandler(fs)
}
testCases := map[string]struct {
workspace file.Handler
wantFiles []string
wantErr bool
}{
"no files": {
workspace: workspace(nil),
wantFiles: []string{},
},
"clean backup dir": {
workspace: workspace([]string{
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir),
}),
wantFiles: []string{},
},
"clean working dir": {
workspace: workspace([]string{
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeWorkingDir),
}),
wantFiles: []string{},
},
"clean backup dir leave other files": {
workspace: workspace([]string{
filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir),
filepath.Join(constants.UpgradeDir, "someFile"),
}),
wantFiles: []string{
filepath.Join(constants.UpgradeDir, "someFile"),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
u := upgrader()
err := u.CleanUpTerraformMigrations(tc.workspace)
if tc.wantErr {
require.Error(err)
return
}
require.NoError(err)
for _, f := range tc.wantFiles {
_, err := tc.workspace.Stat(f)
require.NoError(err, "file %s should exist", f)
}
})
}
}
type stubTerraformClient struct {
hasDiff bool
prepareWorkspaceErr error
showErr error
planErr error
CreateClusterErr error
}
func (u *stubTerraformClient) PrepareUpgradeWorkspace(string, string, string, terraform.Variables) error {
return u.prepareWorkspaceErr
}
func (u *stubTerraformClient) ShowPlan(context.Context, terraform.LogLevel, string, io.Writer) error {
return u.showErr
}
func (u *stubTerraformClient) Plan(context.Context, terraform.LogLevel, string, ...string) (bool, error) {
return u.hasDiff, u.planErr
}
func (u *stubTerraformClient) CreateCluster(context.Context, terraform.LogLevel, ...string) (terraform.CreateOutput, error) {
return terraform.CreateOutput{}, u.CreateClusterErr
}

View file

@ -0,0 +1,10 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
/*
Package upgrade provides functionality to upgrade the cluster and it's resources
*/
package upgrade

View file

@ -52,7 +52,7 @@ var (
// setup checks that the prerequisites for the test are met: // setup checks that the prerequisites for the test are met:
// - a workspace is set // - a workspace is set
// - a CLI path is set // - a CLI path is set
// - the constellation-upgrade folder does not exist. // - the upgrade folder does not exist.
func setup() error { func setup() error {
workingDir, err := workingDir(*workspace) workingDir, err := workingDir(*workspace)
if err != nil { if err != nil {
@ -66,8 +66,8 @@ func setup() error {
if _, err := getCLIPath(*cliPath); err != nil { if _, err := getCLIPath(*cliPath); err != nil {
return fmt.Errorf("getting CLI path: %w", err) return fmt.Errorf("getting CLI path: %w", err)
} }
if _, err := os.Stat("constellation-upgrade"); err == nil { if _, err := os.Stat(constants.UpgradeDir); err == nil {
return errors.New("please remove the existing constellation-upgrade folder") return fmt.Errorf("please remove the existing %s folder", constants.UpgradeDir)
} }
return nil return nil
@ -107,7 +107,16 @@ func TestUpgrade(t *testing.T) {
log.Println(string(data)) log.Println(string(data))
log.Println("Triggering upgrade.") log.Println("Triggering upgrade.")
cmd = exec.CommandContext(context.Background(), cli, "upgrade", "apply", "--force", "--debug", "-y")
tfLogFlag := ""
cmd = exec.CommandContext(context.Background(), cli, "--help")
msg, err = cmd.CombinedOutput()
require.NoErrorf(err, "%s", string(msg))
if strings.Contains(string(msg), "--tf-log") {
tfLogFlag = "--tf-log=DEBUG"
}
cmd = exec.CommandContext(context.Background(), cli, "upgrade", "apply", "--force", "--debug", "--yes", tfLogFlag)
msg, err = cmd.CombinedOutput() msg, err = cmd.CombinedOutput()
require.NoErrorf(err, "%s", string(msg)) require.NoErrorf(err, "%s", string(msg))
require.NoError(containsUnexepectedMsg(string(msg))) require.NoError(containsUnexepectedMsg(string(msg)))

View file

@ -148,6 +148,16 @@ const (
MiniConstellationUID = "mini" MiniConstellationUID = "mini"
// TerraformLogFile is the file name of the Terraform log file. // TerraformLogFile is the file name of the Terraform log file.
TerraformLogFile = "terraform.log" TerraformLogFile = "terraform.log"
// TerraformUpgradePlanFile is the file name of the zipfile created by Terraform plan for Constellation upgrades.
TerraformUpgradePlanFile = "plan.zip"
// TerraformUpgradeWorkingDir is the directory name for the Terraform workspace being used in an upgrade.
TerraformUpgradeWorkingDir = "terraform"
// TerraformUpgradeBackupDir is the directory name being used to backup the pre-upgrade state in an upgrade.
TerraformUpgradeBackupDir = "terraform-backup"
// TerraformMigrationOutputFile is the file name of the output file created by a successful Terraform migration.
TerraformMigrationOutputFile = "terraform-migration-output.json"
// UpgradeDir is the name of the directory being used for cluster upgrades.
UpgradeDir = "constellation-upgrade"
// //
// Kubernetes. // Kubernetes.

View file

@ -14,10 +14,13 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"io" "io"
"io/fs" "io/fs"
"os" "os"
"path" "path"
"path/filepath"
"strings"
"github.com/siderolabs/talos/pkg/machinery/config/encoder" "github.com/siderolabs/talos/pkg/machinery/config/encoder"
"github.com/spf13/afero" "github.com/spf13/afero"
@ -175,3 +178,50 @@ func (h *Handler) Stat(name string) (fs.FileInfo, error) {
func (h *Handler) MkdirAll(name string) error { func (h *Handler) MkdirAll(name string) error {
return h.fs.MkdirAll(name, 0o700) return h.fs.MkdirAll(name, 0o700)
} }
// CopyDir copies the src directory recursively into dst with the given options. OptMkdirAll
// is always set. CopyDir does not follow symlinks.
func (h *Handler) CopyDir(src, dst string, opts ...Option) error {
opts = append(opts, OptMkdirAll)
root := filepath.Join(src, string(filepath.Separator))
walkFunc := func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
pathWithoutRoot := strings.TrimPrefix(path, root)
return h.CopyFile(path, filepath.Join(dst, pathWithoutRoot), opts...)
}
return h.fs.Walk(src, walkFunc)
}
// CopyFile copies the file from src to dst with the given options, respecting file permissions.
func (h *Handler) CopyFile(src, dst string, opts ...Option) error {
srcInfo, err := h.fs.Stat(src)
if err != nil {
return fmt.Errorf("stat source file: %w", err)
}
content, err := h.fs.ReadFile(src)
if err != nil {
return fmt.Errorf("read source file: %w", err)
}
err = h.Write(dst, content, opts...)
if err != nil {
return fmt.Errorf("write destination file: %w", err)
}
err = h.fs.Chmod(dst, srcInfo.Mode())
if err != nil {
return fmt.Errorf("chmod destination file: %w", err)
}
return nil
}

View file

@ -8,6 +8,8 @@ package file
import ( import (
"encoding/json" "encoding/json"
"io/fs"
"path/filepath"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
@ -350,3 +352,123 @@ func TestRemove(t *testing.T) {
assert.Error(handler.Remove("d")) assert.Error(handler.Remove("d"))
} }
func TestCopyFile(t *testing.T) {
perms := fs.FileMode(0o644)
setupFs := func(existingFiles ...string) afero.Fs {
fs := afero.NewMemMapFs()
aferoHelper := afero.Afero{Fs: fs}
for _, file := range existingFiles {
require.NoError(t, aferoHelper.WriteFile(file, []byte{}, perms))
}
return fs
}
testCases := map[string]struct {
fs afero.Fs
copyFiles [][]string
checkFiles []string
opts []Option
wantErr bool
}{
"successful copy": {
fs: setupFs("a"),
copyFiles: [][]string{{"a", "b"}},
checkFiles: []string{"b"},
},
"copy to existing file overwrite": {
fs: setupFs("a", "b"),
copyFiles: [][]string{{"a", "b"}},
checkFiles: []string{"b"},
opts: []Option{OptOverwrite},
},
"copy to existing file no overwrite": {
fs: setupFs("a", "b"),
copyFiles: [][]string{{"a", "b"}},
checkFiles: []string{"b"},
wantErr: true,
},
"file doesn't exist": {
fs: setupFs("a"),
copyFiles: [][]string{{"b", "c"}},
checkFiles: []string{"a"},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
handler := NewHandler(tc.fs)
for _, files := range tc.copyFiles {
err := handler.CopyFile(files[0], files[1], tc.opts...)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
}
}
for _, file := range tc.checkFiles {
info, err := handler.fs.Stat(file)
assert.Equal(perms, info.Mode())
require.NoError(err)
}
})
}
}
func TestCopyDir(t *testing.T) {
setupHandler := func(existingFiles ...string) Handler {
fs := afero.NewMemMapFs()
handler := NewHandler(fs)
for _, file := range existingFiles {
err := handler.Write(file, []byte("some content"), OptMkdirAll)
require.NoError(t, err)
}
return handler
}
testCases := map[string]struct {
handler Handler
copyFiles [][]string
checkFiles []string
opts []Option
}{
"successful copy": {
handler: setupHandler(filepath.Join("someDir", "someFile"), filepath.Join("someDir", "someOtherDir", "someOtherFile")),
copyFiles: [][]string{{"someDir", "copiedDir"}},
checkFiles: []string{filepath.Join("copiedDir", "someFile"), filepath.Join("copiedDir", "someOtherDir", "someOtherFile")},
},
"copy file": {
handler: setupHandler("someFile"),
copyFiles: [][]string{{"someFile", "copiedFile"}},
checkFiles: []string{"copiedFile"},
},
"copy to existing dir overwrite": {
handler: setupHandler(filepath.Join("someDir", "someFile"), filepath.Join("someDir", "someOtherDir", "someOtherFile"), filepath.Join("copiedDir", "someExistingFile")),
copyFiles: [][]string{{"someDir", "copiedDir"}},
checkFiles: []string{filepath.Join("copiedDir", "someFile"), filepath.Join("copiedDir", "someOtherDir", "someOtherFile"), filepath.Join("copiedDir", "someExistingFile")},
opts: []Option{OptOverwrite},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
for _, files := range tc.copyFiles {
err := tc.handler.CopyDir(files[0], files[1], tc.opts...)
require.NoError(err)
}
for _, file := range tc.checkFiles {
_, err := tc.handler.fs.Stat(file)
require.NoError(err)
}
})
}
}