mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
cli: unify cloudcmd
create and upgrade code (#2513)
* Unify cloudcmd create and upgrade code * Make libvirt runner code a bit more idempotent --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
e8cf0f59bd
commit
625dc26644
@ -4,16 +4,15 @@ load("//bazel/go:go_test.bzl", "go_test")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "cloudcmd",
|
name = "cloudcmd",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"apply.go",
|
||||||
"clients.go",
|
"clients.go",
|
||||||
"cloudcmd.go",
|
"cloudcmd.go",
|
||||||
"clusterupgrade.go",
|
|
||||||
"create.go",
|
|
||||||
"iam.go",
|
"iam.go",
|
||||||
"iamupgrade.go",
|
"iamupgrade.go",
|
||||||
"rollback.go",
|
"rollback.go",
|
||||||
"serviceaccount.go",
|
"serviceaccount.go",
|
||||||
"terminate.go",
|
"terminate.go",
|
||||||
"tfupgrade.go",
|
"tfplan.go",
|
||||||
"tfvars.go",
|
"tfvars.go",
|
||||||
"validators.go",
|
"validators.go",
|
||||||
],
|
],
|
||||||
@ -44,13 +43,13 @@ go_library(
|
|||||||
go_test(
|
go_test(
|
||||||
name = "cloudcmd_test",
|
name = "cloudcmd_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"apply_test.go",
|
||||||
"clients_test.go",
|
"clients_test.go",
|
||||||
"clusterupgrade_test.go",
|
|
||||||
"create_test.go",
|
|
||||||
"iam_test.go",
|
"iam_test.go",
|
||||||
"rollback_test.go",
|
"rollback_test.go",
|
||||||
"terminate_test.go",
|
"terminate_test.go",
|
||||||
"tfupgrade_test.go",
|
"tfplan_test.go",
|
||||||
|
"tfvars_test.go",
|
||||||
"validators_test.go",
|
"validators_test.go",
|
||||||
],
|
],
|
||||||
embed = [":cloudcmd"],
|
embed = [":cloudcmd"],
|
||||||
|
154
cli/internal/cloudcmd/apply.go
Normal file
154
cli/internal/cloudcmd/apply.go
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudcmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/maa"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// WithRollbackOnError indicates a rollback should be performed on error.
|
||||||
|
WithRollbackOnError RollbackBehavior = true
|
||||||
|
// WithoutRollbackOnError indicates a rollback should not be performed on error.
|
||||||
|
WithoutRollbackOnError RollbackBehavior = false
|
||||||
|
)
|
||||||
|
|
||||||
|
// RollbackBehavior is a boolean flag that indicates whether a rollback should be performed.
|
||||||
|
type RollbackBehavior bool
|
||||||
|
|
||||||
|
// Applier creates or updates cloud resources.
|
||||||
|
type Applier struct {
|
||||||
|
fileHandler file.Handler
|
||||||
|
imageFetcher imageFetcher
|
||||||
|
libvirtRunner libvirtRunner
|
||||||
|
rawDownloader rawDownloader
|
||||||
|
policyPatcher policyPatcher
|
||||||
|
terraformClient tfResourceClient
|
||||||
|
logLevel terraform.LogLevel
|
||||||
|
|
||||||
|
workingDir string
|
||||||
|
backupDir string
|
||||||
|
out io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewApplier creates a new Applier.
|
||||||
|
func NewApplier(
|
||||||
|
ctx context.Context, out io.Writer, workingDir, backupDir string,
|
||||||
|
logLevel terraform.LogLevel, fileHandler file.Handler,
|
||||||
|
) (*Applier, func(), error) {
|
||||||
|
tfClient, err := terraform.New(ctx, workingDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("setting up terraform client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Applier{
|
||||||
|
fileHandler: fileHandler,
|
||||||
|
imageFetcher: imagefetcher.New(),
|
||||||
|
libvirtRunner: libvirt.New(),
|
||||||
|
rawDownloader: imagefetcher.NewDownloader(),
|
||||||
|
policyPatcher: maa.NewAzurePolicyPatcher(),
|
||||||
|
terraformClient: tfClient,
|
||||||
|
logLevel: logLevel,
|
||||||
|
workingDir: workingDir,
|
||||||
|
backupDir: backupDir,
|
||||||
|
out: out,
|
||||||
|
}, tfClient.RemoveInstaller, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plan plans the given configuration and prepares the Terraform workspace.
|
||||||
|
func (a *Applier) Plan(ctx context.Context, conf *config.Config) (bool, error) {
|
||||||
|
vars, err := a.terraformApplyVars(ctx, conf)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("creating terraform variables: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return plan(
|
||||||
|
ctx, a.terraformClient, a.fileHandler, a.out, a.logLevel, vars,
|
||||||
|
filepath.Join(constants.TerraformEmbeddedDir, strings.ToLower(conf.GetProvider().String())),
|
||||||
|
a.workingDir,
|
||||||
|
filepath.Join(a.backupDir, constants.TerraformUpgradeBackupDir),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply applies the prepared configuration by creating or updating cloud resources.
|
||||||
|
func (a *Applier) Apply(ctx context.Context, csp cloudprovider.Provider, withRollback RollbackBehavior) (infra state.Infrastructure, retErr error) {
|
||||||
|
if withRollback {
|
||||||
|
var rollbacker rollbacker
|
||||||
|
switch csp {
|
||||||
|
case cloudprovider.QEMU:
|
||||||
|
rollbacker = &rollbackerQEMU{client: a.terraformClient, libvirt: a.libvirtRunner}
|
||||||
|
default:
|
||||||
|
rollbacker = &rollbackerTerraform{client: a.terraformClient}
|
||||||
|
}
|
||||||
|
defer rollbackOnError(a.out, &retErr, rollbacker, a.logLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
infraState, err := a.terraformClient.ApplyCluster(ctx, csp, a.logLevel)
|
||||||
|
if err != nil {
|
||||||
|
return infraState, fmt.Errorf("terraform apply: %w", err)
|
||||||
|
}
|
||||||
|
if csp == cloudprovider.Azure && infraState.Azure != nil {
|
||||||
|
if err := a.policyPatcher.Patch(ctx, infraState.Azure.AttestationURL); err != nil {
|
||||||
|
return infraState, fmt.Errorf("patching policies: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return infraState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestoreWorkspace rolls back the existing workspace to the backup directory created when planning an action,
|
||||||
|
// and the user decides to not apply it.
|
||||||
|
// Note that this will not apply the restored state from the backup.
|
||||||
|
func (a *Applier) RestoreWorkspace() error {
|
||||||
|
return restoreBackup(a.fileHandler, a.workingDir, filepath.Join(a.backupDir, constants.TerraformUpgradeBackupDir))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Applier) terraformApplyVars(ctx context.Context, conf *config.Config) (terraform.Variables, error) {
|
||||||
|
imageRef, err := a.imageFetcher.FetchReference(
|
||||||
|
ctx,
|
||||||
|
conf.GetProvider(),
|
||||||
|
conf.GetAttestationConfig().GetVariant(),
|
||||||
|
conf.Image, conf.GetRegion(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("fetching image reference: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch conf.GetProvider() {
|
||||||
|
case cloudprovider.AWS:
|
||||||
|
return awsTerraformVars(conf, imageRef), nil
|
||||||
|
case cloudprovider.Azure:
|
||||||
|
return azureTerraformVars(conf, imageRef), nil
|
||||||
|
case cloudprovider.GCP:
|
||||||
|
return gcpTerraformVars(conf, imageRef), nil
|
||||||
|
case cloudprovider.OpenStack:
|
||||||
|
return openStackTerraformVars(conf, imageRef)
|
||||||
|
case cloudprovider.QEMU:
|
||||||
|
return qemuTerraformVars(ctx, conf, imageRef, a.libvirtRunner, a.rawDownloader)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported provider: %s", conf.GetProvider())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// policyPatcher interacts with the CSP (currently only applies for Azure) to update the attestation policy.
|
||||||
|
type policyPatcher interface {
|
||||||
|
Patch(ctx context.Context, attestationURL string) error
|
||||||
|
}
|
371
cli/internal/cloudcmd/apply_test.go
Normal file
371
cli/internal/cloudcmd/apply_test.go
Normal file
@ -0,0 +1,371 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudcmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestApplier(t *testing.T) {
|
||||||
|
t.Setenv("CONSTELLATION_OPENSTACK_DEV", "1")
|
||||||
|
failOnNonAMD64 := (runtime.GOARCH != "amd64") || (runtime.GOOS != "linux")
|
||||||
|
ip := "192.0.2.1"
|
||||||
|
configWithProvider := func(provider cloudprovider.Provider) *config.Config {
|
||||||
|
cfg := config.Default()
|
||||||
|
cfg.RemoveProviderAndAttestationExcept(provider)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := map[string]struct {
|
||||||
|
tfClient tfResourceClient
|
||||||
|
newTfClientErr error
|
||||||
|
libvirt *stubLibvirtRunner
|
||||||
|
provider cloudprovider.Provider
|
||||||
|
config *config.Config
|
||||||
|
policyPatcher *stubPolicyPatcher
|
||||||
|
wantErr bool
|
||||||
|
wantRollback bool // Use only together with stubClients.
|
||||||
|
wantTerraformRollback bool // When libvirt fails, don't call into Terraform.
|
||||||
|
}{
|
||||||
|
"gcp": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
provider: cloudprovider.GCP,
|
||||||
|
config: configWithProvider(cloudprovider.GCP),
|
||||||
|
},
|
||||||
|
"gcp create cluster error": {
|
||||||
|
tfClient: &stubTerraformClient{applyClusterErr: assert.AnError},
|
||||||
|
provider: cloudprovider.GCP,
|
||||||
|
config: configWithProvider(cloudprovider.GCP),
|
||||||
|
wantErr: true,
|
||||||
|
wantRollback: true,
|
||||||
|
wantTerraformRollback: true,
|
||||||
|
},
|
||||||
|
"azure": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
provider: cloudprovider.Azure,
|
||||||
|
config: configWithProvider(cloudprovider.Azure),
|
||||||
|
policyPatcher: &stubPolicyPatcher{},
|
||||||
|
},
|
||||||
|
"azure trusted launch": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
provider: cloudprovider.Azure,
|
||||||
|
config: func() *config.Config {
|
||||||
|
cfg := config.Default()
|
||||||
|
cfg.RemoveProviderAndAttestationExcept(cloudprovider.Azure)
|
||||||
|
cfg.Attestation = config.AttestationConfig{
|
||||||
|
AzureTrustedLaunch: &config.AzureTrustedLaunch{},
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}(),
|
||||||
|
policyPatcher: &stubPolicyPatcher{},
|
||||||
|
},
|
||||||
|
"azure new policy patch error": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
provider: cloudprovider.Azure,
|
||||||
|
config: configWithProvider(cloudprovider.Azure),
|
||||||
|
policyPatcher: &stubPolicyPatcher{assert.AnError},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"azure create cluster error": {
|
||||||
|
tfClient: &stubTerraformClient{applyClusterErr: assert.AnError},
|
||||||
|
provider: cloudprovider.Azure,
|
||||||
|
config: configWithProvider(cloudprovider.Azure),
|
||||||
|
policyPatcher: &stubPolicyPatcher{},
|
||||||
|
wantErr: true,
|
||||||
|
wantRollback: true,
|
||||||
|
wantTerraformRollback: true,
|
||||||
|
},
|
||||||
|
"openstack": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
libvirt: &stubLibvirtRunner{},
|
||||||
|
provider: cloudprovider.OpenStack,
|
||||||
|
config: func() *config.Config {
|
||||||
|
cfg := config.Default()
|
||||||
|
cfg.RemoveProviderAndAttestationExcept(cloudprovider.OpenStack)
|
||||||
|
cfg.Provider.OpenStack.Cloud = "testcloud"
|
||||||
|
return cfg
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
"openstack without clouds.yaml": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
libvirt: &stubLibvirtRunner{},
|
||||||
|
provider: cloudprovider.OpenStack,
|
||||||
|
config: configWithProvider(cloudprovider.OpenStack),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"openstack create cluster error": {
|
||||||
|
tfClient: &stubTerraformClient{applyClusterErr: assert.AnError},
|
||||||
|
libvirt: &stubLibvirtRunner{},
|
||||||
|
provider: cloudprovider.OpenStack,
|
||||||
|
config: func() *config.Config {
|
||||||
|
cfg := config.Default()
|
||||||
|
cfg.RemoveProviderAndAttestationExcept(cloudprovider.OpenStack)
|
||||||
|
cfg.Provider.OpenStack.Cloud = "testcloud"
|
||||||
|
return cfg
|
||||||
|
}(),
|
||||||
|
wantErr: true,
|
||||||
|
wantRollback: true,
|
||||||
|
wantTerraformRollback: true,
|
||||||
|
},
|
||||||
|
"qemu": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
libvirt: &stubLibvirtRunner{},
|
||||||
|
provider: cloudprovider.QEMU,
|
||||||
|
config: configWithProvider(cloudprovider.QEMU),
|
||||||
|
wantErr: failOnNonAMD64,
|
||||||
|
},
|
||||||
|
"qemu create cluster error": {
|
||||||
|
tfClient: &stubTerraformClient{applyClusterErr: assert.AnError},
|
||||||
|
libvirt: &stubLibvirtRunner{},
|
||||||
|
provider: cloudprovider.QEMU,
|
||||||
|
config: configWithProvider(cloudprovider.QEMU),
|
||||||
|
wantErr: true,
|
||||||
|
wantRollback: !failOnNonAMD64, // if we run on non-AMD64/linux, we don't get to a point where rollback is needed
|
||||||
|
wantTerraformRollback: true,
|
||||||
|
},
|
||||||
|
"qemu start libvirt error": {
|
||||||
|
tfClient: &stubTerraformClient{ip: ip},
|
||||||
|
libvirt: &stubLibvirtRunner{startErr: assert.AnError},
|
||||||
|
provider: cloudprovider.QEMU,
|
||||||
|
config: configWithProvider(cloudprovider.QEMU),
|
||||||
|
wantRollback: !failOnNonAMD64,
|
||||||
|
wantTerraformRollback: false,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"unknown provider": {
|
||||||
|
tfClient: &stubTerraformClient{},
|
||||||
|
provider: cloudprovider.Unknown,
|
||||||
|
config: func() *config.Config {
|
||||||
|
cfg := config.Default()
|
||||||
|
cfg.RemoveProviderAndAttestationExcept(cloudprovider.AWS)
|
||||||
|
cfg.Provider.AWS = nil
|
||||||
|
return cfg
|
||||||
|
}(),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
applier := &Applier{
|
||||||
|
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||||
|
imageFetcher: &stubImageFetcher{
|
||||||
|
reference: "some-image",
|
||||||
|
},
|
||||||
|
terraformClient: tc.tfClient,
|
||||||
|
libvirtRunner: tc.libvirt,
|
||||||
|
rawDownloader: &stubRawDownloader{
|
||||||
|
destination: "some-destination",
|
||||||
|
},
|
||||||
|
policyPatcher: tc.policyPatcher,
|
||||||
|
logLevel: terraform.LogLevelNone,
|
||||||
|
workingDir: "test",
|
||||||
|
backupDir: "test-backup",
|
||||||
|
out: &bytes.Buffer{},
|
||||||
|
}
|
||||||
|
|
||||||
|
diff, err := applier.Plan(context.Background(), tc.config)
|
||||||
|
if err != nil {
|
||||||
|
assert.True(tc.wantErr, "unexpected error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.False(diff)
|
||||||
|
|
||||||
|
idFile, err := applier.Apply(context.Background(), tc.provider, true)
|
||||||
|
|
||||||
|
if tc.wantErr {
|
||||||
|
assert.Error(err)
|
||||||
|
if tc.wantRollback {
|
||||||
|
cl := tc.tfClient.(*stubTerraformClient)
|
||||||
|
if tc.wantTerraformRollback {
|
||||||
|
assert.True(cl.destroyCalled)
|
||||||
|
}
|
||||||
|
assert.True(cl.cleanUpWorkspaceCalled)
|
||||||
|
if tc.provider == cloudprovider.QEMU {
|
||||||
|
assert.True(tc.libvirt.stopCalled)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal(ip, idFile.ClusterEndpoint)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPlan(t *testing.T) {
|
||||||
|
setUpFilesystem := func(existingFiles []string) file.Handler {
|
||||||
|
fs := file.NewHandler(afero.NewMemMapFs())
|
||||||
|
require.NoError(t, fs.Write("test/terraform.tfstate", []byte{}, file.OptMkdirAll))
|
||||||
|
for _, f := range existingFiles {
|
||||||
|
require.NoError(t, fs.Write(f, []byte{}))
|
||||||
|
}
|
||||||
|
return fs
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := map[string]struct {
|
||||||
|
upgradeID string
|
||||||
|
tf *stubTerraformClient
|
||||||
|
fs file.Handler
|
||||||
|
want bool
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
"success no diff": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{},
|
||||||
|
fs: setUpFilesystem([]string{}),
|
||||||
|
},
|
||||||
|
"success diff": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{
|
||||||
|
planDiff: true,
|
||||||
|
},
|
||||||
|
fs: setUpFilesystem([]string{}),
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
"prepare workspace error": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{
|
||||||
|
prepareWorkspaceErr: assert.AnError,
|
||||||
|
},
|
||||||
|
fs: setUpFilesystem([]string{}),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"plan error": {
|
||||||
|
tf: &stubTerraformClient{
|
||||||
|
planErr: assert.AnError,
|
||||||
|
},
|
||||||
|
fs: setUpFilesystem([]string{}),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"show plan error no diff": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{
|
||||||
|
showPlanErr: assert.AnError,
|
||||||
|
},
|
||||||
|
fs: setUpFilesystem([]string{}),
|
||||||
|
},
|
||||||
|
"show plan error diff": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{
|
||||||
|
showPlanErr: assert.AnError,
|
||||||
|
planDiff: true,
|
||||||
|
},
|
||||||
|
fs: setUpFilesystem([]string{}),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"workspace not clean": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{},
|
||||||
|
fs: setUpFilesystem([]string{filepath.Join(constants.UpgradeDir, "1234", constants.TerraformUpgradeBackupDir)}),
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
u := &Applier{
|
||||||
|
terraformClient: tc.tf,
|
||||||
|
policyPatcher: stubPolicyPatcher{},
|
||||||
|
fileHandler: tc.fs,
|
||||||
|
imageFetcher: &stubImageFetcher{reference: "some-image"},
|
||||||
|
rawDownloader: &stubRawDownloader{destination: "some-destination"},
|
||||||
|
libvirtRunner: &stubLibvirtRunner{},
|
||||||
|
logLevel: terraform.LogLevelDebug,
|
||||||
|
backupDir: filepath.Join(constants.UpgradeDir, tc.upgradeID),
|
||||||
|
workingDir: "test",
|
||||||
|
out: io.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := config.Default()
|
||||||
|
cfg.RemoveProviderAndAttestationExcept(cloudprovider.QEMU)
|
||||||
|
|
||||||
|
diff, err := u.Plan(context.Background(), cfg)
|
||||||
|
if tc.wantErr {
|
||||||
|
require.Error(err)
|
||||||
|
} else {
|
||||||
|
require.NoError(err)
|
||||||
|
require.Equal(tc.want, diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApply(t *testing.T) {
|
||||||
|
testCases := map[string]struct {
|
||||||
|
upgradeID string
|
||||||
|
tf *stubTerraformClient
|
||||||
|
policyPatcher stubPolicyPatcher
|
||||||
|
fs file.Handler
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
"success": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{},
|
||||||
|
policyPatcher: stubPolicyPatcher{},
|
||||||
|
},
|
||||||
|
"apply error": {
|
||||||
|
upgradeID: "1234",
|
||||||
|
tf: &stubTerraformClient{
|
||||||
|
applyClusterErr: assert.AnError,
|
||||||
|
},
|
||||||
|
policyPatcher: stubPolicyPatcher{},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := require.New(t)
|
||||||
|
|
||||||
|
u := &Applier{
|
||||||
|
terraformClient: tc.tf,
|
||||||
|
logLevel: terraform.LogLevelDebug,
|
||||||
|
libvirtRunner: &stubLibvirtRunner{},
|
||||||
|
policyPatcher: stubPolicyPatcher{},
|
||||||
|
fileHandler: tc.fs,
|
||||||
|
backupDir: filepath.Join(constants.UpgradeDir, tc.upgradeID),
|
||||||
|
workingDir: "test",
|
||||||
|
out: io.Discard,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := u.Apply(context.Background(), cloudprovider.QEMU, WithoutRollbackOnError)
|
||||||
|
if tc.wantErr {
|
||||||
|
assert.Error(err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type stubPolicyPatcher struct {
|
||||||
|
patchErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stubPolicyPatcher) Patch(_ context.Context, _ string) error {
|
||||||
|
return s.patchErr
|
||||||
|
}
|
@ -24,39 +24,34 @@ type imageFetcher interface {
|
|||||||
) (string, error)
|
) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type tfCommonClient interface {
|
type tfDestroyer interface {
|
||||||
CleanUpWorkspace() error
|
CleanUpWorkspace() error
|
||||||
Destroy(ctx context.Context, logLevel terraform.LogLevel) error
|
Destroy(ctx context.Context, logLevel terraform.LogLevel) error
|
||||||
PrepareWorkspace(path string, input terraform.Variables) error
|
|
||||||
RemoveInstaller()
|
RemoveInstaller()
|
||||||
}
|
}
|
||||||
|
|
||||||
type tfResourceClient interface {
|
type tfPlanner interface {
|
||||||
tfCommonClient
|
|
||||||
ApplyCluster(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (state.Infrastructure, error)
|
|
||||||
ShowInfrastructure(ctx context.Context, provider cloudprovider.Provider) (state.Infrastructure, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tfIAMClient interface {
|
|
||||||
tfCommonClient
|
|
||||||
ApplyIAM(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
|
||||||
ShowIAM(ctx context.Context, provider cloudprovider.Provider) (terraform.IAMOutput, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tfUpgradePlanner interface {
|
|
||||||
ShowPlan(ctx context.Context, logLevel terraform.LogLevel, output io.Writer) error
|
ShowPlan(ctx context.Context, logLevel terraform.LogLevel, output io.Writer) error
|
||||||
Plan(ctx context.Context, logLevel terraform.LogLevel) (bool, error)
|
Plan(ctx context.Context, logLevel terraform.LogLevel) (bool, error)
|
||||||
PrepareWorkspace(path string, vars terraform.Variables) error
|
PrepareWorkspace(path string, vars terraform.Variables) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type tfIAMUpgradeClient interface {
|
type tfResourceClient interface {
|
||||||
tfUpgradePlanner
|
tfDestroyer
|
||||||
ApplyIAM(ctx context.Context, csp cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
tfPlanner
|
||||||
|
ApplyCluster(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (state.Infrastructure, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type tfClusterUpgradeClient interface {
|
type tfIAMClient interface {
|
||||||
tfUpgradePlanner
|
tfDestroyer
|
||||||
ApplyCluster(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (state.Infrastructure, error)
|
PrepareWorkspace(path string, vars terraform.Variables) error
|
||||||
|
ApplyIAM(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
||||||
|
ShowIAM(ctx context.Context, provider cloudprovider.Provider) (terraform.IAMOutput, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type tfIAMUpgradeClient interface {
|
||||||
|
tfPlanner
|
||||||
|
ApplyIAM(ctx context.Context, csp cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type libvirtRunner interface {
|
type libvirtRunner interface {
|
||||||
|
@ -37,12 +37,16 @@ type stubTerraformClient struct {
|
|||||||
removeInstallerCalled bool
|
removeInstallerCalled bool
|
||||||
destroyCalled bool
|
destroyCalled bool
|
||||||
showCalled bool
|
showCalled bool
|
||||||
createClusterErr error
|
applyClusterErr error
|
||||||
destroyErr error
|
destroyErr error
|
||||||
prepareWorkspaceErr error
|
prepareWorkspaceErr error
|
||||||
cleanUpWorkspaceErr error
|
cleanUpWorkspaceErr error
|
||||||
iamOutputErr error
|
iamOutputErr error
|
||||||
showErr error
|
showInfrastructureErr error
|
||||||
|
showIAMErr error
|
||||||
|
planDiff bool
|
||||||
|
planErr error
|
||||||
|
showPlanErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubTerraformClient) ApplyCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (state.Infrastructure, error) {
|
func (c *stubTerraformClient) ApplyCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (state.Infrastructure, error) {
|
||||||
@ -53,7 +57,7 @@ func (c *stubTerraformClient) ApplyCluster(_ context.Context, _ cloudprovider.Pr
|
|||||||
Azure: &state.Azure{
|
Azure: &state.Azure{
|
||||||
AttestationURL: c.attestationURL,
|
AttestationURL: c.attestationURL,
|
||||||
},
|
},
|
||||||
}, c.createClusterErr
|
}, c.applyClusterErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubTerraformClient) ApplyIAM(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (terraform.IAMOutput, error) {
|
func (c *stubTerraformClient) ApplyIAM(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (terraform.IAMOutput, error) {
|
||||||
@ -80,12 +84,20 @@ func (c *stubTerraformClient) RemoveInstaller() {
|
|||||||
|
|
||||||
func (c *stubTerraformClient) ShowInfrastructure(_ context.Context, _ cloudprovider.Provider) (state.Infrastructure, error) {
|
func (c *stubTerraformClient) ShowInfrastructure(_ context.Context, _ cloudprovider.Provider) (state.Infrastructure, error) {
|
||||||
c.showCalled = true
|
c.showCalled = true
|
||||||
return c.infraState, c.showErr
|
return c.infraState, c.showInfrastructureErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubTerraformClient) ShowIAM(_ context.Context, _ cloudprovider.Provider) (terraform.IAMOutput, error) {
|
func (c *stubTerraformClient) ShowIAM(_ context.Context, _ cloudprovider.Provider) (terraform.IAMOutput, error) {
|
||||||
c.showCalled = true
|
c.showCalled = true
|
||||||
return c.iamOutput, c.showErr
|
return c.iamOutput, c.showIAMErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stubTerraformClient) Plan(_ context.Context, _ terraform.LogLevel) (bool, error) {
|
||||||
|
return c.planDiff, c.planErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stubTerraformClient) ShowPlan(_ context.Context, _ terraform.LogLevel, _ io.Writer) error {
|
||||||
|
return c.showPlanErr
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubLibvirtRunner struct {
|
type stubLibvirtRunner struct {
|
||||||
|
@ -1,88 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cloudcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/maa"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClusterUpgrader is responsible for performing Terraform migrations on cluster upgrades.
|
|
||||||
type ClusterUpgrader struct {
|
|
||||||
tf tfClusterUpgradeClient
|
|
||||||
policyPatcher policyPatcher
|
|
||||||
fileHandler file.Handler
|
|
||||||
existingWorkspace string
|
|
||||||
upgradeWorkspace string
|
|
||||||
logLevel terraform.LogLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClusterUpgrader initializes and returns a new ClusterUpgrader.
|
|
||||||
// existingWorkspace is the directory holding the existing Terraform resources.
|
|
||||||
// upgradeWorkspace is the directory to use for holding temporary files and resources required to apply the upgrade.
|
|
||||||
func NewClusterUpgrader(ctx context.Context, existingWorkspace, upgradeWorkspace string,
|
|
||||||
logLevel terraform.LogLevel, fileHandler file.Handler,
|
|
||||||
) (*ClusterUpgrader, error) {
|
|
||||||
tfClient, err := terraform.New(ctx, existingWorkspace)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("setting up terraform client: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ClusterUpgrader{
|
|
||||||
tf: tfClient,
|
|
||||||
policyPatcher: maa.NewAzurePolicyPatcher(),
|
|
||||||
fileHandler: fileHandler,
|
|
||||||
existingWorkspace: existingWorkspace,
|
|
||||||
upgradeWorkspace: upgradeWorkspace,
|
|
||||||
logLevel: logLevel,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PlanClusterUpgrade prepares the upgrade workspace and plans the possible Terraform migrations for Constellation's cluster resources (Loadbalancers, VMs, networks etc.).
|
|
||||||
// In case of possible migrations, the diff is written to outWriter and this function returns true.
|
|
||||||
func (u *ClusterUpgrader) PlanClusterUpgrade(ctx context.Context, outWriter io.Writer, vars terraform.Variables, csp cloudprovider.Provider,
|
|
||||||
) (bool, error) {
|
|
||||||
return planUpgrade(
|
|
||||||
ctx, u.tf, u.fileHandler, outWriter, u.logLevel, vars,
|
|
||||||
filepath.Join(constants.TerraformEmbeddedDir, strings.ToLower(csp.String())),
|
|
||||||
u.existingWorkspace,
|
|
||||||
filepath.Join(u.upgradeWorkspace, constants.TerraformUpgradeBackupDir),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreClusterWorkspace rolls back the existing workspace to the backup directory created when planning an upgrade,
|
|
||||||
// when the user decides to not apply an upgrade after planning it.
|
|
||||||
// Note that this will not apply the restored state from the backup.
|
|
||||||
func (u *ClusterUpgrader) RestoreClusterWorkspace() error {
|
|
||||||
return restoreBackup(u.fileHandler, u.existingWorkspace, filepath.Join(u.upgradeWorkspace, constants.TerraformUpgradeBackupDir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyClusterUpgrade applies the Terraform migrations planned by PlanClusterUpgrade.
|
|
||||||
// On success, the workspace of the Upgrader replaces the existing Terraform workspace.
|
|
||||||
func (u *ClusterUpgrader) ApplyClusterUpgrade(ctx context.Context, csp cloudprovider.Provider) (state.Infrastructure, error) {
|
|
||||||
infraState, err := u.tf.ApplyCluster(ctx, csp, u.logLevel)
|
|
||||||
if err != nil {
|
|
||||||
return infraState, fmt.Errorf("terraform apply: %w", err)
|
|
||||||
}
|
|
||||||
if infraState.Azure != nil {
|
|
||||||
if err := u.policyPatcher.Patch(ctx, infraState.Azure.AttestationURL); err != nil {
|
|
||||||
return infraState, fmt.Errorf("patching policies: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return infraState, nil
|
|
||||||
}
|
|
@ -1,204 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cloudcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPlanClusterUpgrade(t *testing.T) {
|
|
||||||
setUpFilesystem := func(existingFiles []string) file.Handler {
|
|
||||||
fs := file.NewHandler(afero.NewMemMapFs())
|
|
||||||
require.NoError(t, fs.MkdirAll("test"))
|
|
||||||
for _, f := range existingFiles {
|
|
||||||
require.NoError(t, fs.Write(f, []byte{}))
|
|
||||||
}
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
upgradeID string
|
|
||||||
tf *tfClusterUpgradeStub
|
|
||||||
fs file.Handler
|
|
||||||
want bool
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
"success no diff": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{},
|
|
||||||
fs: setUpFilesystem([]string{}),
|
|
||||||
},
|
|
||||||
"success diff": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{
|
|
||||||
planDiff: true,
|
|
||||||
},
|
|
||||||
fs: setUpFilesystem([]string{}),
|
|
||||||
want: true,
|
|
||||||
},
|
|
||||||
"prepare workspace error": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{
|
|
||||||
prepareWorkspaceErr: assert.AnError,
|
|
||||||
},
|
|
||||||
fs: setUpFilesystem([]string{}),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"plan error": {
|
|
||||||
tf: &tfClusterUpgradeStub{
|
|
||||||
planErr: assert.AnError,
|
|
||||||
},
|
|
||||||
fs: setUpFilesystem([]string{}),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"show plan error no diff": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{
|
|
||||||
showErr: assert.AnError,
|
|
||||||
},
|
|
||||||
fs: setUpFilesystem([]string{}),
|
|
||||||
},
|
|
||||||
"show plan error diff": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{
|
|
||||||
showErr: assert.AnError,
|
|
||||||
planDiff: true,
|
|
||||||
},
|
|
||||||
fs: setUpFilesystem([]string{}),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"workspace not clean": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{},
|
|
||||||
fs: setUpFilesystem([]string{filepath.Join(constants.UpgradeDir, "1234", constants.TerraformUpgradeBackupDir)}),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
u := &ClusterUpgrader{
|
|
||||||
tf: tc.tf,
|
|
||||||
policyPatcher: stubPolicyPatcher{},
|
|
||||||
fileHandler: tc.fs,
|
|
||||||
upgradeWorkspace: filepath.Join(constants.UpgradeDir, tc.upgradeID),
|
|
||||||
existingWorkspace: "test",
|
|
||||||
logLevel: terraform.LogLevelDebug,
|
|
||||||
}
|
|
||||||
|
|
||||||
diff, err := u.PlanClusterUpgrade(context.Background(), io.Discard, &terraform.QEMUVariables{}, cloudprovider.Unknown)
|
|
||||||
if tc.wantErr {
|
|
||||||
require.Error(err)
|
|
||||||
} else {
|
|
||||||
require.NoError(err)
|
|
||||||
require.Equal(tc.want, diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyClusterUpgrade(t *testing.T) {
|
|
||||||
setUpFilesystem := func(upgradeID string, existingFiles ...string) file.Handler {
|
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
|
||||||
|
|
||||||
require.NoError(t,
|
|
||||||
fh.Write(
|
|
||||||
filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeWorkingDir, "someFile"),
|
|
||||||
[]byte("some content"),
|
|
||||||
))
|
|
||||||
for _, f := range existingFiles {
|
|
||||||
require.NoError(t, fh.Write(f, []byte("some content")))
|
|
||||||
}
|
|
||||||
return fh
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
upgradeID string
|
|
||||||
tf *tfClusterUpgradeStub
|
|
||||||
policyPatcher stubPolicyPatcher
|
|
||||||
fs file.Handler
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
"success": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{},
|
|
||||||
fs: setUpFilesystem("1234"),
|
|
||||||
policyPatcher: stubPolicyPatcher{},
|
|
||||||
},
|
|
||||||
"apply error": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &tfClusterUpgradeStub{
|
|
||||||
applyErr: assert.AnError,
|
|
||||||
},
|
|
||||||
fs: setUpFilesystem("1234"),
|
|
||||||
policyPatcher: stubPolicyPatcher{},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := require.New(t)
|
|
||||||
|
|
||||||
tc.tf.file = tc.fs
|
|
||||||
u := &ClusterUpgrader{
|
|
||||||
tf: tc.tf,
|
|
||||||
policyPatcher: stubPolicyPatcher{},
|
|
||||||
fileHandler: tc.fs,
|
|
||||||
upgradeWorkspace: filepath.Join(constants.UpgradeDir, tc.upgradeID),
|
|
||||||
existingWorkspace: "test",
|
|
||||||
logLevel: terraform.LogLevelDebug,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := u.ApplyClusterUpgrade(context.Background(), cloudprovider.Unknown)
|
|
||||||
if tc.wantErr {
|
|
||||||
assert.Error(err)
|
|
||||||
} else {
|
|
||||||
assert.NoError(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tfClusterUpgradeStub struct {
|
|
||||||
file file.Handler
|
|
||||||
applyErr error
|
|
||||||
planErr error
|
|
||||||
planDiff bool
|
|
||||||
showErr error
|
|
||||||
prepareWorkspaceErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tfClusterUpgradeStub) Plan(_ context.Context, _ terraform.LogLevel) (bool, error) {
|
|
||||||
return t.planDiff, t.planErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tfClusterUpgradeStub) ShowPlan(_ context.Context, _ terraform.LogLevel, _ io.Writer) error {
|
|
||||||
return t.showErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tfClusterUpgradeStub) ApplyCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (state.Infrastructure, error) {
|
|
||||||
return state.Infrastructure{}, t.applyErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tfClusterUpgradeStub) PrepareWorkspace(_ string, _ terraform.Variables) error {
|
|
||||||
return t.prepareWorkspaceErr
|
|
||||||
}
|
|
@ -1,303 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cloudcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/maa"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Creator creates cloud resources.
|
|
||||||
type Creator struct {
|
|
||||||
out io.Writer
|
|
||||||
image imageFetcher
|
|
||||||
newTerraformClient func(ctx context.Context, workspace string) (tfResourceClient, error)
|
|
||||||
newLibvirtRunner func() libvirtRunner
|
|
||||||
newRawDownloader func() rawDownloader
|
|
||||||
policyPatcher policyPatcher
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCreator creates a new creator.
|
|
||||||
func NewCreator(out io.Writer) *Creator {
|
|
||||||
return &Creator{
|
|
||||||
out: out,
|
|
||||||
image: imagefetcher.New(),
|
|
||||||
newTerraformClient: func(ctx context.Context, workspace string) (tfResourceClient, error) {
|
|
||||||
return terraform.New(ctx, workspace)
|
|
||||||
},
|
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
|
||||||
return libvirt.New()
|
|
||||||
},
|
|
||||||
newRawDownloader: func() rawDownloader {
|
|
||||||
return imagefetcher.NewDownloader()
|
|
||||||
},
|
|
||||||
policyPatcher: maa.NewAzurePolicyPatcher(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOptions are the options for creating a Constellation cluster.
|
|
||||||
type CreateOptions struct {
|
|
||||||
Provider cloudprovider.Provider
|
|
||||||
Config *config.Config
|
|
||||||
TFWorkspace string
|
|
||||||
image string
|
|
||||||
TFLogLevel terraform.LogLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates the handed amount of instances and all the needed resources.
|
|
||||||
func (c *Creator) Create(ctx context.Context, opts CreateOptions) (state.Infrastructure, error) {
|
|
||||||
provider := opts.Config.GetProvider()
|
|
||||||
attestationVariant := opts.Config.GetAttestationConfig().GetVariant()
|
|
||||||
region := opts.Config.GetRegion()
|
|
||||||
image, err := c.image.FetchReference(ctx, provider, attestationVariant, opts.Config.Image, region)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("fetching image reference: %w", err)
|
|
||||||
}
|
|
||||||
opts.image = image
|
|
||||||
|
|
||||||
cl, err := c.newTerraformClient(ctx, opts.TFWorkspace)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
defer cl.RemoveInstaller()
|
|
||||||
|
|
||||||
var infraState state.Infrastructure
|
|
||||||
switch opts.Provider {
|
|
||||||
case cloudprovider.AWS:
|
|
||||||
|
|
||||||
infraState, err = c.createAWS(ctx, cl, opts)
|
|
||||||
case cloudprovider.GCP:
|
|
||||||
|
|
||||||
infraState, err = c.createGCP(ctx, cl, opts)
|
|
||||||
case cloudprovider.Azure:
|
|
||||||
|
|
||||||
infraState, err = c.createAzure(ctx, cl, opts)
|
|
||||||
case cloudprovider.OpenStack:
|
|
||||||
|
|
||||||
infraState, err = c.createOpenStack(ctx, cl, opts)
|
|
||||||
case cloudprovider.QEMU:
|
|
||||||
if runtime.GOARCH != "amd64" || runtime.GOOS != "linux" {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("creation of a QEMU based Constellation is not supported for %s/%s", runtime.GOOS, runtime.GOARCH)
|
|
||||||
}
|
|
||||||
lv := c.newLibvirtRunner()
|
|
||||||
qemuOpts := qemuCreateOptions{
|
|
||||||
source: image,
|
|
||||||
CreateOptions: opts,
|
|
||||||
}
|
|
||||||
|
|
||||||
infraState, err = c.createQEMU(ctx, cl, lv, qemuOpts)
|
|
||||||
default:
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("unsupported cloud provider: %s", opts.Provider)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("creating cluster: %w", err)
|
|
||||||
}
|
|
||||||
return infraState, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Creator) createAWS(ctx context.Context, cl tfResourceClient, opts CreateOptions) (tfOutput state.Infrastructure, retErr error) {
|
|
||||||
vars := awsTerraformVars(opts.Config, opts.image)
|
|
||||||
|
|
||||||
tfOutput, err := runTerraformCreate(ctx, cl, cloudprovider.AWS, vars, c.out, opts.TFLogLevel)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tfOutput, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Creator) createGCP(ctx context.Context, cl tfResourceClient, opts CreateOptions) (tfOutput state.Infrastructure, retErr error) {
|
|
||||||
vars := gcpTerraformVars(opts.Config, opts.image)
|
|
||||||
|
|
||||||
tfOutput, err := runTerraformCreate(ctx, cl, cloudprovider.GCP, vars, c.out, opts.TFLogLevel)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tfOutput, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Creator) createAzure(ctx context.Context, cl tfResourceClient, opts CreateOptions) (tfOutput state.Infrastructure, retErr error) {
|
|
||||||
vars := azureTerraformVars(opts.Config, opts.image)
|
|
||||||
|
|
||||||
tfOutput, err := runTerraformCreate(ctx, cl, cloudprovider.Azure, vars, c.out, opts.TFLogLevel)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if vars.GetCreateMAA() {
|
|
||||||
// Patch the attestation policy to allow the cluster to boot while having secure boot disabled.
|
|
||||||
if tfOutput.Azure == nil {
|
|
||||||
return state.Infrastructure{}, errors.New("no Terraform Azure output found")
|
|
||||||
}
|
|
||||||
if err := c.policyPatcher.Patch(ctx, tfOutput.Azure.AttestationURL); err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tfOutput, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// policyPatcher interacts with the CSP (currently only applies for Azure) to update the attestation policy.
|
|
||||||
type policyPatcher interface {
|
|
||||||
Patch(ctx context.Context, attestationURL string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The azurerm Terraform provider enforces its own convention of case sensitivity for Azure URIs which Azure's API itself does not enforce or, even worse, actually returns.
|
|
||||||
// Let's go loco with case insensitive Regexp here and fix the user input here to be compliant with this arbitrary design decision.
|
|
||||||
var (
|
|
||||||
caseInsensitiveSubscriptionsRegexp = regexp.MustCompile(`(?i)\/subscriptions\/`)
|
|
||||||
caseInsensitiveResourceGroupRegexp = regexp.MustCompile(`(?i)\/resourcegroups\/`)
|
|
||||||
caseInsensitiveProvidersRegexp = regexp.MustCompile(`(?i)\/providers\/`)
|
|
||||||
caseInsensitiveUserAssignedIdentitiesRegexp = regexp.MustCompile(`(?i)\/userassignedidentities\/`)
|
|
||||||
caseInsensitiveMicrosoftManagedIdentity = regexp.MustCompile(`(?i)\/microsoft.managedidentity\/`)
|
|
||||||
caseInsensitiveCommunityGalleriesRegexp = regexp.MustCompile(`(?i)\/communitygalleries\/`)
|
|
||||||
caseInsensitiveImagesRegExp = regexp.MustCompile(`(?i)\/images\/`)
|
|
||||||
caseInsensitiveVersionsRegExp = regexp.MustCompile(`(?i)\/versions\/`)
|
|
||||||
)
|
|
||||||
|
|
||||||
func normalizeAzureURIs(vars *terraform.AzureClusterVariables) *terraform.AzureClusterVariables {
|
|
||||||
vars.UserAssignedIdentity = caseInsensitiveSubscriptionsRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/subscriptions/")
|
|
||||||
vars.UserAssignedIdentity = caseInsensitiveResourceGroupRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/resourceGroups/")
|
|
||||||
vars.UserAssignedIdentity = caseInsensitiveProvidersRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/providers/")
|
|
||||||
vars.UserAssignedIdentity = caseInsensitiveUserAssignedIdentitiesRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/userAssignedIdentities/")
|
|
||||||
vars.UserAssignedIdentity = caseInsensitiveMicrosoftManagedIdentity.ReplaceAllString(vars.UserAssignedIdentity, "/Microsoft.ManagedIdentity/")
|
|
||||||
vars.ImageID = caseInsensitiveCommunityGalleriesRegexp.ReplaceAllString(vars.ImageID, "/communityGalleries/")
|
|
||||||
vars.ImageID = caseInsensitiveImagesRegExp.ReplaceAllString(vars.ImageID, "/images/")
|
|
||||||
vars.ImageID = caseInsensitiveVersionsRegExp.ReplaceAllString(vars.ImageID, "/versions/")
|
|
||||||
|
|
||||||
return vars
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Creator) createOpenStack(ctx context.Context, cl tfResourceClient, opts CreateOptions) (infraState state.Infrastructure, retErr error) {
|
|
||||||
if os.Getenv("CONSTELLATION_OPENSTACK_DEV") != "1" {
|
|
||||||
return state.Infrastructure{}, errors.New("Constellation must be fine-tuned to your OpenStack deployment. Please create an issue or contact Edgeless Systems at https://edgeless.systems/contact/")
|
|
||||||
}
|
|
||||||
if _, hasOSAuthURL := os.LookupEnv("OS_AUTH_URL"); !hasOSAuthURL && opts.Config.Provider.OpenStack.Cloud == "" {
|
|
||||||
return state.Infrastructure{}, errors.New(
|
|
||||||
"neither environment variable OS_AUTH_URL nor cloud name for \"clouds.yaml\" is set. OpenStack authentication requires a set of " +
|
|
||||||
"OS_* environment variables that are typically sourced into the current shell with an openrc file " +
|
|
||||||
"or a cloud name for \"clouds.yaml\". " +
|
|
||||||
"See https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html for more information",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
vars := openStackTerraformVars(opts.Config, opts.image)
|
|
||||||
|
|
||||||
infraState, err := runTerraformCreate(ctx, cl, cloudprovider.OpenStack, vars, c.out, opts.TFLogLevel)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return infraState, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runTerraformCreate(ctx context.Context, cl tfResourceClient, provider cloudprovider.Provider, vars terraform.Variables, outWriter io.Writer, loglevel terraform.LogLevel) (output state.Infrastructure, retErr error) {
|
|
||||||
if err := cl.PrepareWorkspace(path.Join(constants.TerraformEmbeddedDir, strings.ToLower(provider.String())), vars); err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer rollbackOnError(outWriter, &retErr, &rollbackerTerraform{client: cl}, loglevel)
|
|
||||||
tfOutput, err := cl.ApplyCluster(ctx, provider, loglevel)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tfOutput, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type qemuCreateOptions struct {
|
|
||||||
source string
|
|
||||||
CreateOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Creator) createQEMU(ctx context.Context, cl tfResourceClient, lv libvirtRunner, opts qemuCreateOptions) (tfOutput state.Infrastructure, retErr error) {
|
|
||||||
qemuRollbacker := &rollbackerQEMU{client: cl, libvirt: lv}
|
|
||||||
defer rollbackOnError(c.out, &retErr, qemuRollbacker, opts.TFLogLevel)
|
|
||||||
|
|
||||||
// TODO(malt3): render progress bar
|
|
||||||
downloader := c.newRawDownloader()
|
|
||||||
imagePath, err := downloader.Download(ctx, c.out, false, opts.source, opts.Config.Image)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("download raw image: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
libvirtURI := opts.Config.Provider.QEMU.LibvirtURI
|
|
||||||
libvirtSocketPath := "."
|
|
||||||
|
|
||||||
switch {
|
|
||||||
// if no libvirt URI is specified, start a libvirt container
|
|
||||||
case libvirtURI == "":
|
|
||||||
if err := lv.Start(ctx, opts.Config.Name, opts.Config.Provider.QEMU.LibvirtContainerImage); err != nil {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("start libvirt container: %w", err)
|
|
||||||
}
|
|
||||||
libvirtURI = libvirt.LibvirtTCPConnectURI
|
|
||||||
|
|
||||||
// socket for system URI should be in /var/run/libvirt/libvirt-sock
|
|
||||||
case libvirtURI == "qemu:///system":
|
|
||||||
libvirtSocketPath = "/var/run/libvirt/libvirt-sock"
|
|
||||||
|
|
||||||
// socket for session URI should be in /run/user/<uid>/libvirt/libvirt-sock
|
|
||||||
case libvirtURI == "qemu:///session":
|
|
||||||
libvirtSocketPath = fmt.Sprintf("/run/user/%d/libvirt/libvirt-sock", os.Getuid())
|
|
||||||
|
|
||||||
// if a unix socket is specified we need to parse the URI to get the socket path
|
|
||||||
case strings.HasPrefix(libvirtURI, "qemu+unix://"):
|
|
||||||
unixURI, err := url.Parse(strings.TrimPrefix(libvirtURI, "qemu+unix://"))
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, err
|
|
||||||
}
|
|
||||||
libvirtSocketPath = unixURI.Query().Get("socket")
|
|
||||||
if libvirtSocketPath == "" {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("socket path not specified in qemu+unix URI: %s", libvirtURI)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metadataLibvirtURI := libvirtURI
|
|
||||||
if libvirtSocketPath != "." {
|
|
||||||
metadataLibvirtURI = "qemu:///system"
|
|
||||||
}
|
|
||||||
|
|
||||||
vars := qemuTerraformVars(opts.Config, imagePath, libvirtURI, libvirtSocketPath, metadataLibvirtURI)
|
|
||||||
|
|
||||||
if opts.Config.Provider.QEMU.Firmware != "" {
|
|
||||||
vars.Firmware = toPtr(opts.Config.Provider.QEMU.Firmware)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cl.PrepareWorkspace(path.Join(constants.TerraformEmbeddedDir, strings.ToLower(cloudprovider.QEMU.String())), vars); err != nil {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("prepare workspace: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tfOutput, err = cl.ApplyCluster(ctx, opts.Provider, opts.TFLogLevel)
|
|
||||||
if err != nil {
|
|
||||||
return state.Infrastructure{}, fmt.Errorf("create cluster: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tfOutput, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toPtr[T any](v T) *T {
|
|
||||||
return &v
|
|
||||||
}
|
|
@ -1,306 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cloudcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreator(t *testing.T) {
|
|
||||||
t.Setenv("CONSTELLATION_OPENSTACK_DEV", "1")
|
|
||||||
failOnNonAMD64 := (runtime.GOARCH != "amd64") || (runtime.GOOS != "linux")
|
|
||||||
ip := "192.0.2.1"
|
|
||||||
someErr := errors.New("failed")
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
tfClient tfResourceClient
|
|
||||||
newTfClientErr error
|
|
||||||
libvirt *stubLibvirtRunner
|
|
||||||
provider cloudprovider.Provider
|
|
||||||
config *config.Config
|
|
||||||
policyPatcher *stubPolicyPatcher
|
|
||||||
wantErr bool
|
|
||||||
wantRollback bool // Use only together with stubClients.
|
|
||||||
wantTerraformRollback bool // When libvirt fails, don't call into Terraform.
|
|
||||||
}{
|
|
||||||
"gcp": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
provider: cloudprovider.GCP,
|
|
||||||
config: config.Default(),
|
|
||||||
},
|
|
||||||
"gcp newTerraformClient error": {
|
|
||||||
newTfClientErr: someErr,
|
|
||||||
provider: cloudprovider.GCP,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"gcp create cluster error": {
|
|
||||||
tfClient: &stubTerraformClient{createClusterErr: someErr},
|
|
||||||
provider: cloudprovider.GCP,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: true,
|
|
||||||
wantRollback: true,
|
|
||||||
wantTerraformRollback: true,
|
|
||||||
},
|
|
||||||
"azure": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.RemoveProviderAndAttestationExcept(cloudprovider.Azure)
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
policyPatcher: &stubPolicyPatcher{},
|
|
||||||
},
|
|
||||||
"azure trusted launch": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.Attestation = config.AttestationConfig{
|
|
||||||
AzureTrustedLaunch: &config.AzureTrustedLaunch{},
|
|
||||||
}
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
policyPatcher: &stubPolicyPatcher{},
|
|
||||||
},
|
|
||||||
"azure new policy patch error": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.RemoveProviderAndAttestationExcept(cloudprovider.Azure)
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
policyPatcher: &stubPolicyPatcher{someErr},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"azure newTerraformClient error": {
|
|
||||||
newTfClientErr: someErr,
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.RemoveProviderAndAttestationExcept(cloudprovider.Azure)
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
policyPatcher: &stubPolicyPatcher{},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"azure create cluster error": {
|
|
||||||
tfClient: &stubTerraformClient{createClusterErr: someErr},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.RemoveProviderAndAttestationExcept(cloudprovider.Azure)
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
policyPatcher: &stubPolicyPatcher{},
|
|
||||||
wantErr: true,
|
|
||||||
wantRollback: true,
|
|
||||||
wantTerraformRollback: true,
|
|
||||||
},
|
|
||||||
"openstack": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.OpenStack,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.Provider.OpenStack.Cloud = "testcloud"
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
},
|
|
||||||
"openstack without clouds.yaml": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.OpenStack,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"openstack newTerraformClient error": {
|
|
||||||
newTfClientErr: someErr,
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.OpenStack,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.Provider.OpenStack.Cloud = "testcloud"
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"openstack create cluster error": {
|
|
||||||
tfClient: &stubTerraformClient{createClusterErr: someErr},
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.OpenStack,
|
|
||||||
config: func() *config.Config {
|
|
||||||
cfg := config.Default()
|
|
||||||
cfg.Provider.OpenStack.Cloud = "testcloud"
|
|
||||||
return cfg
|
|
||||||
}(),
|
|
||||||
wantErr: true,
|
|
||||||
wantRollback: true,
|
|
||||||
wantTerraformRollback: true,
|
|
||||||
},
|
|
||||||
"qemu": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.QEMU,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: failOnNonAMD64,
|
|
||||||
},
|
|
||||||
"qemu newTerraformClient error": {
|
|
||||||
newTfClientErr: someErr,
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.QEMU,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"qemu create cluster error": {
|
|
||||||
tfClient: &stubTerraformClient{createClusterErr: someErr},
|
|
||||||
libvirt: &stubLibvirtRunner{},
|
|
||||||
provider: cloudprovider.QEMU,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: true,
|
|
||||||
wantRollback: !failOnNonAMD64, // if we run on non-AMD64/linux, we don't get to a point where rollback is needed
|
|
||||||
wantTerraformRollback: true,
|
|
||||||
},
|
|
||||||
"qemu start libvirt error": {
|
|
||||||
tfClient: &stubTerraformClient{ip: ip},
|
|
||||||
libvirt: &stubLibvirtRunner{startErr: someErr},
|
|
||||||
provider: cloudprovider.QEMU,
|
|
||||||
config: config.Default(),
|
|
||||||
wantRollback: !failOnNonAMD64,
|
|
||||||
wantTerraformRollback: false,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"unknown provider": {
|
|
||||||
tfClient: &stubTerraformClient{},
|
|
||||||
provider: cloudprovider.Unknown,
|
|
||||||
config: config.Default(),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
creator := &Creator{
|
|
||||||
out: &bytes.Buffer{},
|
|
||||||
image: &stubImageFetcher{
|
|
||||||
reference: "some-image",
|
|
||||||
},
|
|
||||||
newTerraformClient: func(_ context.Context, _ string) (tfResourceClient, error) {
|
|
||||||
return tc.tfClient, tc.newTfClientErr
|
|
||||||
},
|
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
|
||||||
return tc.libvirt
|
|
||||||
},
|
|
||||||
newRawDownloader: func() rawDownloader {
|
|
||||||
return &stubRawDownloader{
|
|
||||||
destination: "some-destination",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
policyPatcher: tc.policyPatcher,
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := CreateOptions{
|
|
||||||
Provider: tc.provider,
|
|
||||||
Config: tc.config,
|
|
||||||
TFLogLevel: terraform.LogLevelNone,
|
|
||||||
}
|
|
||||||
idFile, err := creator.Create(context.Background(), opts)
|
|
||||||
|
|
||||||
if tc.wantErr {
|
|
||||||
assert.Error(err)
|
|
||||||
if tc.wantRollback {
|
|
||||||
cl := tc.tfClient.(*stubTerraformClient)
|
|
||||||
if tc.wantTerraformRollback {
|
|
||||||
assert.True(cl.destroyCalled)
|
|
||||||
}
|
|
||||||
assert.True(cl.cleanUpWorkspaceCalled)
|
|
||||||
if tc.provider == cloudprovider.QEMU {
|
|
||||||
assert.True(tc.libvirt.stopCalled)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.Equal(ip, idFile.ClusterEndpoint)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubPolicyPatcher struct {
|
|
||||||
patchErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s stubPolicyPatcher) Patch(_ context.Context, _ string) error {
|
|
||||||
return s.patchErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNormalizeAzureURIs(t *testing.T) {
|
|
||||||
testCases := map[string]struct {
|
|
||||||
in *terraform.AzureClusterVariables
|
|
||||||
want *terraform.AzureClusterVariables
|
|
||||||
}{
|
|
||||||
"empty": {
|
|
||||||
in: &terraform.AzureClusterVariables{},
|
|
||||||
want: &terraform.AzureClusterVariables{},
|
|
||||||
},
|
|
||||||
"no change": {
|
|
||||||
in: &terraform.AzureClusterVariables{
|
|
||||||
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
|
||||||
},
|
|
||||||
want: &terraform.AzureClusterVariables{
|
|
||||||
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"fix image id": {
|
|
||||||
in: &terraform.AzureClusterVariables{
|
|
||||||
ImageID: "/CommunityGalleries/foo/Images/constellation/Versions/2.1.0",
|
|
||||||
},
|
|
||||||
want: &terraform.AzureClusterVariables{
|
|
||||||
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"fix resource group": {
|
|
||||||
in: &terraform.AzureClusterVariables{
|
|
||||||
UserAssignedIdentity: "/subscriptions/foo/resourcegroups/test/providers/Microsoft.ManagedIdentity/userAssignedIdentities/uai",
|
|
||||||
},
|
|
||||||
want: &terraform.AzureClusterVariables{
|
|
||||||
UserAssignedIdentity: "/subscriptions/foo/resourceGroups/test/providers/Microsoft.ManagedIdentity/userAssignedIdentities/uai",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"fix arbitrary casing": {
|
|
||||||
in: &terraform.AzureClusterVariables{
|
|
||||||
ImageID: "/CoMMUnitygaLLeries/foo/iMAges/constellation/vERsions/2.1.0",
|
|
||||||
UserAssignedIdentity: "/subsCRiptions/foo/resoURCegroups/test/proViDers/MICROsoft.mANAgedIdentity/USerASsignediDENtities/uai",
|
|
||||||
},
|
|
||||||
want: &terraform.AzureClusterVariables{
|
|
||||||
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
|
||||||
UserAssignedIdentity: "/subscriptions/foo/resourceGroups/test/providers/Microsoft.ManagedIdentity/userAssignedIdentities/uai",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
out := normalizeAzureURIs(tc.in)
|
|
||||||
assert.Equal(tc.want, out)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -234,7 +234,7 @@ func TestGetTfstateServiceAccountKey(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"show error": {
|
"show error": {
|
||||||
cl: &stubTerraformClient{
|
cl: &stubTerraformClient{
|
||||||
showErr: assert.AnError,
|
showIAMErr: assert.AnError,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
wantShowCalled: true,
|
wantShowCalled: true,
|
||||||
|
@ -59,7 +59,7 @@ func NewIAMUpgrader(ctx context.Context, existingWorkspace, upgradeWorkspace str
|
|||||||
// PlanIAMUpgrade prepares the upgrade workspace and plans the possible Terraform migrations for Constellation's IAM resources (service accounts, permissions etc.).
|
// PlanIAMUpgrade prepares the upgrade workspace and plans the possible Terraform migrations for Constellation's IAM resources (service accounts, permissions etc.).
|
||||||
// In case of possible migrations, the diff is written to outWriter and this function returns true.
|
// In case of possible migrations, the diff is written to outWriter and this function returns true.
|
||||||
func (u *IAMUpgrader) PlanIAMUpgrade(ctx context.Context, outWriter io.Writer, vars terraform.Variables, csp cloudprovider.Provider) (bool, error) {
|
func (u *IAMUpgrader) PlanIAMUpgrade(ctx context.Context, outWriter io.Writer, vars terraform.Variables, csp cloudprovider.Provider) (bool, error) {
|
||||||
return planUpgrade(
|
return plan(
|
||||||
ctx, u.tf, u.fileHandler, outWriter, u.logLevel, vars,
|
ctx, u.tf, u.fileHandler, outWriter, u.logLevel, vars,
|
||||||
filepath.Join(constants.TerraformEmbeddedDir, "iam", strings.ToLower(csp.String())),
|
filepath.Join(constants.TerraformEmbeddedDir, "iam", strings.ToLower(csp.String())),
|
||||||
u.existingWorkspace,
|
u.existingWorkspace,
|
||||||
|
@ -37,7 +37,7 @@ func rollbackOnError(w io.Writer, onErr *error, roll rollbacker, logLevel terraf
|
|||||||
}
|
}
|
||||||
|
|
||||||
type rollbackerTerraform struct {
|
type rollbackerTerraform struct {
|
||||||
client tfCommonClient
|
client tfDestroyer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rollbackerTerraform) rollback(ctx context.Context, w io.Writer, logLevel terraform.LogLevel) error {
|
func (r *rollbackerTerraform) rollback(ctx context.Context, w io.Writer, logLevel terraform.LogLevel) error {
|
||||||
@ -50,7 +50,7 @@ func (r *rollbackerTerraform) rollback(ctx context.Context, w io.Writer, logLeve
|
|||||||
}
|
}
|
||||||
|
|
||||||
type rollbackerQEMU struct {
|
type rollbackerQEMU struct {
|
||||||
client tfResourceClient
|
client tfDestroyer
|
||||||
libvirt libvirtRunner
|
libvirt libvirtRunner
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,14 +15,14 @@ import (
|
|||||||
|
|
||||||
// Terminator deletes cloud provider resources.
|
// Terminator deletes cloud provider resources.
|
||||||
type Terminator struct {
|
type Terminator struct {
|
||||||
newTerraformClient func(ctx context.Context, tfWorkspace string) (tfResourceClient, error)
|
newTerraformClient func(ctx context.Context, tfWorkspace string) (tfDestroyer, error)
|
||||||
newLibvirtRunner func() libvirtRunner
|
newLibvirtRunner func() libvirtRunner
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTerminator create a new cloud terminator.
|
// NewTerminator create a new cloud terminator.
|
||||||
func NewTerminator() *Terminator {
|
func NewTerminator() *Terminator {
|
||||||
return &Terminator{
|
return &Terminator{
|
||||||
newTerraformClient: func(ctx context.Context, tfWorkspace string) (tfResourceClient, error) {
|
newTerraformClient: func(ctx context.Context, tfWorkspace string) (tfDestroyer, error) {
|
||||||
return terraform.New(ctx, tfWorkspace)
|
return terraform.New(ctx, tfWorkspace)
|
||||||
},
|
},
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
newLibvirtRunner: func() libvirtRunner {
|
||||||
@ -48,7 +48,7 @@ func (t *Terminator) Terminate(ctx context.Context, tfWorkspace string, logLevel
|
|||||||
return t.terminateTerraform(ctx, cl, logLevel)
|
return t.terminateTerraform(ctx, cl, logLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Terminator) terminateTerraform(ctx context.Context, cl tfResourceClient, logLevel terraform.LogLevel) error {
|
func (t *Terminator) terminateTerraform(ctx context.Context, cl tfDestroyer, logLevel terraform.LogLevel) error {
|
||||||
if err := cl.Destroy(ctx, logLevel); err != nil {
|
if err := cl.Destroy(ctx, logLevel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ func TestTerminator(t *testing.T) {
|
|||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
tfClient tfResourceClient
|
tfClient tfDestroyer
|
||||||
newTfClientErr error
|
newTfClientErr error
|
||||||
libvirt *stubLibvirtRunner
|
libvirt *stubLibvirtRunner
|
||||||
wantErr bool
|
wantErr bool
|
||||||
@ -55,7 +55,7 @@ func TestTerminator(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
terminator := &Terminator{
|
terminator := &Terminator{
|
||||||
newTerraformClient: func(_ context.Context, _ string) (tfResourceClient, error) {
|
newTerraformClient: func(_ context.Context, _ string) (tfDestroyer, error) {
|
||||||
return tc.tfClient, tc.newTfClientErr
|
return tc.tfClient, tc.newTfClientErr
|
||||||
},
|
},
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
newLibvirtRunner: func() libvirtRunner {
|
||||||
|
@ -8,6 +8,7 @@ package cloudcmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -16,20 +17,30 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
)
|
)
|
||||||
|
|
||||||
// planUpgrade prepares a workspace and plans the possible Terraform migrations.
|
// plan prepares a workspace and plans the possible Terraform actions.
|
||||||
|
// This will either create a new workspace or update an existing one.
|
||||||
// In case of possible migrations, the diff is written to outWriter and this function returns true.
|
// In case of possible migrations, the diff is written to outWriter and this function returns true.
|
||||||
func planUpgrade(
|
func plan(
|
||||||
ctx context.Context, tfClient tfUpgradePlanner, fileHandler file.Handler,
|
ctx context.Context, tfClient tfPlanner, fileHandler file.Handler,
|
||||||
outWriter io.Writer, logLevel terraform.LogLevel, vars terraform.Variables,
|
outWriter io.Writer, logLevel terraform.LogLevel, vars terraform.Variables,
|
||||||
templateDir, existingWorkspace, backupDir string,
|
templateDir, existingWorkspace, backupDir string,
|
||||||
) (bool, error) {
|
) (bool, error) {
|
||||||
if err := ensureFileNotExist(fileHandler, backupDir); err != nil {
|
isNewWorkspace, err := fileHandler.IsEmpty(existingWorkspace)
|
||||||
return false, fmt.Errorf("backup directory %s already exists: %w", backupDir, err)
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return false, fmt.Errorf("checking if workspace is empty: %w", err)
|
||||||
|
}
|
||||||
|
isNewWorkspace = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup old workspace
|
// Backup old workspace if it exists
|
||||||
if err := fileHandler.CopyDir(existingWorkspace, backupDir); err != nil {
|
if !isNewWorkspace {
|
||||||
return false, fmt.Errorf("backing up old workspace: %w", err)
|
if err := ensureFileNotExist(fileHandler, backupDir); err != nil {
|
||||||
|
return false, fmt.Errorf("backup directory %s already exists: %w", backupDir, err)
|
||||||
|
}
|
||||||
|
if err := fileHandler.CopyDir(existingWorkspace, backupDir); err != nil {
|
||||||
|
return false, fmt.Errorf("backing up old workspace: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move the new embedded Terraform files into the workspace.
|
// Move the new embedded Terraform files into the workspace.
|
||||||
@ -42,12 +53,16 @@ func planUpgrade(
|
|||||||
return false, fmt.Errorf("terraform plan: %w", err)
|
return false, fmt.Errorf("terraform plan: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we are planning in a new workspace, we don't want to show a diff
|
||||||
|
if isNewWorkspace {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
if hasDiff {
|
if hasDiff {
|
||||||
if err := tfClient.ShowPlan(ctx, logLevel, outWriter); err != nil {
|
if err := tfClient.ShowPlan(ctx, logLevel, outWriter); err != nil {
|
||||||
return false, fmt.Errorf("terraform show plan: %w", err)
|
return false, fmt.Errorf("terraform show plan: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hasDiff, nil
|
return hasDiff, nil
|
||||||
}
|
}
|
||||||
|
|
@ -19,7 +19,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPlanUpgrade(t *testing.T) {
|
func TestTFPlan(t *testing.T) {
|
||||||
const (
|
const (
|
||||||
templateDir = "templateDir"
|
templateDir = "templateDir"
|
||||||
existingWorkspace = "existing"
|
existingWorkspace = "existing"
|
||||||
@ -28,36 +28,37 @@ func TestPlanUpgrade(t *testing.T) {
|
|||||||
)
|
)
|
||||||
fsWithWorkspace := func(require *require.Assertions) file.Handler {
|
fsWithWorkspace := func(require *require.Assertions) file.Handler {
|
||||||
fs := file.NewHandler(afero.NewMemMapFs())
|
fs := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fs.MkdirAll(existingWorkspace))
|
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
|
||||||
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}))
|
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
prepareFs func(require *require.Assertions) file.Handler
|
prepareFs func(require *require.Assertions) file.Handler
|
||||||
tf *stubUpgradePlanner
|
tf *stubUpgradePlanner
|
||||||
wantDiff bool
|
wantDiff bool
|
||||||
wantErr bool
|
wantBackup bool
|
||||||
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"success no diff": {
|
"success no diff": {
|
||||||
prepareFs: fsWithWorkspace,
|
prepareFs: fsWithWorkspace,
|
||||||
tf: &stubUpgradePlanner{},
|
tf: &stubUpgradePlanner{},
|
||||||
|
wantBackup: true,
|
||||||
},
|
},
|
||||||
"success diff": {
|
"success diff": {
|
||||||
prepareFs: fsWithWorkspace,
|
prepareFs: fsWithWorkspace,
|
||||||
tf: &stubUpgradePlanner{
|
tf: &stubUpgradePlanner{
|
||||||
planDiff: true,
|
planDiff: true,
|
||||||
},
|
},
|
||||||
wantDiff: true,
|
wantDiff: true,
|
||||||
|
wantBackup: true,
|
||||||
},
|
},
|
||||||
"workspace does not exist": {
|
"workspace is empty": {
|
||||||
prepareFs: func(require *require.Assertions) file.Handler {
|
prepareFs: func(require *require.Assertions) file.Handler {
|
||||||
return file.NewHandler(afero.NewMemMapFs())
|
return file.NewHandler(afero.NewMemMapFs())
|
||||||
},
|
},
|
||||||
tf: &stubUpgradePlanner{},
|
tf: &stubUpgradePlanner{},
|
||||||
wantErr: true,
|
|
||||||
},
|
},
|
||||||
"workspace not clean": {
|
"backup dir already exists": {
|
||||||
prepareFs: func(require *require.Assertions) file.Handler {
|
prepareFs: func(require *require.Assertions) file.Handler {
|
||||||
fs := fsWithWorkspace(require)
|
fs := fsWithWorkspace(require)
|
||||||
require.NoError(fs.MkdirAll(backupDir))
|
require.NoError(fs.MkdirAll(backupDir))
|
||||||
@ -71,14 +72,16 @@ func TestPlanUpgrade(t *testing.T) {
|
|||||||
tf: &stubUpgradePlanner{
|
tf: &stubUpgradePlanner{
|
||||||
prepareWorkspaceErr: assert.AnError,
|
prepareWorkspaceErr: assert.AnError,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantBackup: true,
|
||||||
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"plan error": {
|
"plan error": {
|
||||||
prepareFs: fsWithWorkspace,
|
prepareFs: fsWithWorkspace,
|
||||||
tf: &stubUpgradePlanner{
|
tf: &stubUpgradePlanner{
|
||||||
planErr: assert.AnError,
|
planErr: assert.AnError,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
wantBackup: true,
|
||||||
},
|
},
|
||||||
"show plan error": {
|
"show plan error": {
|
||||||
prepareFs: fsWithWorkspace,
|
prepareFs: fsWithWorkspace,
|
||||||
@ -86,7 +89,8 @@ func TestPlanUpgrade(t *testing.T) {
|
|||||||
planDiff: true,
|
planDiff: true,
|
||||||
showPlanErr: assert.AnError,
|
showPlanErr: assert.AnError,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
wantBackup: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,19 +99,23 @@ func TestPlanUpgrade(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
fs := tc.prepareFs(require.New(t))
|
fs := tc.prepareFs(require.New(t))
|
||||||
|
|
||||||
hasDiff, err := planUpgrade(
|
hasDiff, planErr := plan(
|
||||||
context.Background(), tc.tf, fs, io.Discard, terraform.LogLevelDebug,
|
context.Background(), tc.tf, fs, io.Discard, terraform.LogLevelDebug,
|
||||||
&terraform.QEMUVariables{},
|
&terraform.QEMUVariables{},
|
||||||
templateDir, existingWorkspace, backupDir,
|
templateDir, existingWorkspace, backupDir,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if tc.wantBackup {
|
||||||
|
_, err := fs.Stat(filepath.Join(backupDir, testFile))
|
||||||
|
assert.NoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(planErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
assert.NoError(err)
|
assert.NoError(planErr)
|
||||||
assert.Equal(tc.wantDiff, hasDiff)
|
assert.Equal(tc.wantDiff, hasDiff)
|
||||||
_, err = fs.Stat(filepath.Join(backupDir, testFile))
|
|
||||||
assert.NoError(err)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -7,10 +7,17 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package cloudcmd
|
package cloudcmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
@ -20,27 +27,18 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TerraformUpgradeVars returns variables required to execute the Terraform scripts.
|
// The azurerm Terraform provider enforces its own convention of case sensitivity for Azure URIs which Azure's API itself does not enforce or, even worse, actually returns.
|
||||||
func TerraformUpgradeVars(conf *config.Config) (terraform.Variables, error) {
|
// These regular expression are used to make sure that the URIs we pass to Terraform are in the format that the provider expects.
|
||||||
// Note that we don't pass any real image as imageRef, as we ignore changes to the image in the terraform.
|
var (
|
||||||
// The image is updates via our operator.
|
caseInsensitiveSubscriptionsRegexp = regexp.MustCompile(`(?i)\/subscriptions\/`)
|
||||||
// Still, the terraform variable verification must accept the values.
|
caseInsensitiveResourceGroupRegexp = regexp.MustCompile(`(?i)\/resourcegroups\/`)
|
||||||
// For AWS, we enforce some basic constraints on the image variable.
|
caseInsensitiveProvidersRegexp = regexp.MustCompile(`(?i)\/providers\/`)
|
||||||
// For Azure, the provider enforces the format below.
|
caseInsensitiveUserAssignedIdentitiesRegexp = regexp.MustCompile(`(?i)\/userassignedidentities\/`)
|
||||||
// For GCP, any placeholder works.
|
caseInsensitiveMicrosoftManagedIdentity = regexp.MustCompile(`(?i)\/microsoft.managedidentity\/`)
|
||||||
var vars terraform.Variables
|
caseInsensitiveCommunityGalleriesRegexp = regexp.MustCompile(`(?i)\/communitygalleries\/`)
|
||||||
switch conf.GetProvider() {
|
caseInsensitiveImagesRegExp = regexp.MustCompile(`(?i)\/images\/`)
|
||||||
case cloudprovider.AWS:
|
caseInsensitiveVersionsRegExp = regexp.MustCompile(`(?i)\/versions\/`)
|
||||||
vars = awsTerraformVars(conf, "ami-placeholder")
|
)
|
||||||
case cloudprovider.Azure:
|
|
||||||
vars = azureTerraformVars(conf, "/communityGalleries/myGalleryName/images/myImageName/versions/latest")
|
|
||||||
case cloudprovider.GCP:
|
|
||||||
vars = gcpTerraformVars(conf, "placeholder")
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported provider: %s", conf.GetProvider())
|
|
||||||
}
|
|
||||||
return vars, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TerraformIAMUpgradeVars returns variables required to execute IAM upgrades with Terraform.
|
// TerraformIAMUpgradeVars returns variables required to execute IAM upgrades with Terraform.
|
||||||
func TerraformIAMUpgradeVars(conf *config.Config, fileHandler file.Handler) (terraform.Variables, error) {
|
func TerraformIAMUpgradeVars(conf *config.Config, fileHandler file.Handler) (terraform.Variables, error) {
|
||||||
@ -114,6 +112,19 @@ func awsTerraformIAMVars(conf *config.Config, oldVars terraform.AWSIAMVariables)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func normalizeAzureURIs(vars *terraform.AzureClusterVariables) *terraform.AzureClusterVariables {
|
||||||
|
vars.UserAssignedIdentity = caseInsensitiveSubscriptionsRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/subscriptions/")
|
||||||
|
vars.UserAssignedIdentity = caseInsensitiveResourceGroupRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/resourceGroups/")
|
||||||
|
vars.UserAssignedIdentity = caseInsensitiveProvidersRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/providers/")
|
||||||
|
vars.UserAssignedIdentity = caseInsensitiveUserAssignedIdentitiesRegexp.ReplaceAllString(vars.UserAssignedIdentity, "/userAssignedIdentities/")
|
||||||
|
vars.UserAssignedIdentity = caseInsensitiveMicrosoftManagedIdentity.ReplaceAllString(vars.UserAssignedIdentity, "/Microsoft.ManagedIdentity/")
|
||||||
|
vars.ImageID = caseInsensitiveCommunityGalleriesRegexp.ReplaceAllString(vars.ImageID, "/communityGalleries/")
|
||||||
|
vars.ImageID = caseInsensitiveImagesRegExp.ReplaceAllString(vars.ImageID, "/images/")
|
||||||
|
vars.ImageID = caseInsensitiveVersionsRegExp.ReplaceAllString(vars.ImageID, "/versions/")
|
||||||
|
|
||||||
|
return vars
|
||||||
|
}
|
||||||
|
|
||||||
// azureTerraformVars provides variables required to execute the Terraform scripts.
|
// azureTerraformVars provides variables required to execute the Terraform scripts.
|
||||||
// It should be the only place to declare the Azure variables.
|
// It should be the only place to declare the Azure variables.
|
||||||
func azureTerraformVars(conf *config.Config, imageRef string) *terraform.AzureClusterVariables {
|
func azureTerraformVars(conf *config.Config, imageRef string) *terraform.AzureClusterVariables {
|
||||||
@ -197,7 +208,19 @@ func gcpTerraformIAMVars(conf *config.Config, oldVars terraform.GCPIAMVariables)
|
|||||||
|
|
||||||
// openStackTerraformVars provides variables required to execute the Terraform scripts.
|
// openStackTerraformVars provides variables required to execute the Terraform scripts.
|
||||||
// It should be the only place to declare the OpenStack variables.
|
// It should be the only place to declare the OpenStack variables.
|
||||||
func openStackTerraformVars(conf *config.Config, imageRef string) *terraform.OpenStackClusterVariables {
|
func openStackTerraformVars(conf *config.Config, imageRef string) (*terraform.OpenStackClusterVariables, error) {
|
||||||
|
if os.Getenv("CONSTELLATION_OPENSTACK_DEV") != "1" {
|
||||||
|
return nil, errors.New("Constellation must be fine-tuned to your OpenStack deployment. Please create an issue or contact Edgeless Systems at https://edgeless.systems/contact/")
|
||||||
|
}
|
||||||
|
if _, hasOSAuthURL := os.LookupEnv("OS_AUTH_URL"); !hasOSAuthURL && conf.Provider.OpenStack.Cloud == "" {
|
||||||
|
return nil, errors.New(
|
||||||
|
"neither environment variable OS_AUTH_URL nor cloud name for \"clouds.yaml\" is set. OpenStack authentication requires a set of " +
|
||||||
|
"OS_* environment variables that are typically sourced into the current shell with an openrc file " +
|
||||||
|
"or a cloud name for \"clouds.yaml\". " +
|
||||||
|
"See https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html for more information",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
nodeGroups := make(map[string]terraform.OpenStackNodeGroup)
|
nodeGroups := make(map[string]terraform.OpenStackNodeGroup)
|
||||||
for groupName, group := range conf.NodeGroups {
|
for groupName, group := range conf.NodeGroups {
|
||||||
nodeGroups[groupName] = terraform.OpenStackNodeGroup{
|
nodeGroups[groupName] = terraform.OpenStackNodeGroup{
|
||||||
@ -222,12 +245,65 @@ func openStackTerraformVars(conf *config.Config, imageRef string) *terraform.Ope
|
|||||||
NodeGroups: nodeGroups,
|
NodeGroups: nodeGroups,
|
||||||
CustomEndpoint: conf.CustomEndpoint,
|
CustomEndpoint: conf.CustomEndpoint,
|
||||||
InternalLoadBalancer: conf.InternalLoadBalancer,
|
InternalLoadBalancer: conf.InternalLoadBalancer,
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// qemuTerraformVars provides variables required to execute the Terraform scripts.
|
// qemuTerraformVars provides variables required to execute the Terraform scripts.
|
||||||
// It should be the only place to declare the QEMU variables.
|
// It should be the only place to declare the QEMU variables.
|
||||||
func qemuTerraformVars(conf *config.Config, imageRef string, libvirtURI, libvirtSocketPath, metadataLibvirtURI string) *terraform.QEMUVariables {
|
func qemuTerraformVars(
|
||||||
|
ctx context.Context, conf *config.Config, imageRef string,
|
||||||
|
lv libvirtRunner, downloader rawDownloader,
|
||||||
|
) (*terraform.QEMUVariables, error) {
|
||||||
|
if runtime.GOARCH != "amd64" || runtime.GOOS != "linux" {
|
||||||
|
return nil, fmt.Errorf("creation of a QEMU based Constellation is not supported for %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
imagePath, err := downloader.Download(ctx, nil, false, imageRef, conf.Image)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("download raw image: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
libvirtURI := conf.Provider.QEMU.LibvirtURI
|
||||||
|
libvirtSocketPath := "."
|
||||||
|
|
||||||
|
switch {
|
||||||
|
// if no libvirt URI is specified, start a libvirt container
|
||||||
|
case libvirtURI == "":
|
||||||
|
if err := lv.Start(ctx, conf.Name, conf.Provider.QEMU.LibvirtContainerImage); err != nil {
|
||||||
|
return nil, fmt.Errorf("start libvirt container: %w", err)
|
||||||
|
}
|
||||||
|
libvirtURI = libvirt.LibvirtTCPConnectURI
|
||||||
|
|
||||||
|
// socket for system URI should be in /var/run/libvirt/libvirt-sock
|
||||||
|
case libvirtURI == "qemu:///system":
|
||||||
|
libvirtSocketPath = "/var/run/libvirt/libvirt-sock"
|
||||||
|
|
||||||
|
// socket for session URI should be in /run/user/<uid>/libvirt/libvirt-sock
|
||||||
|
case libvirtURI == "qemu:///session":
|
||||||
|
libvirtSocketPath = fmt.Sprintf("/run/user/%d/libvirt/libvirt-sock", os.Getuid())
|
||||||
|
|
||||||
|
// if a unix socket is specified we need to parse the URI to get the socket path
|
||||||
|
case strings.HasPrefix(libvirtURI, "qemu+unix://"):
|
||||||
|
unixURI, err := url.Parse(strings.TrimPrefix(libvirtURI, "qemu+unix://"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
libvirtSocketPath = unixURI.Query().Get("socket")
|
||||||
|
if libvirtSocketPath == "" {
|
||||||
|
return nil, fmt.Errorf("socket path not specified in qemu+unix URI: %s", libvirtURI)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadataLibvirtURI := libvirtURI
|
||||||
|
if libvirtSocketPath != "." {
|
||||||
|
metadataLibvirtURI = "qemu:///system"
|
||||||
|
}
|
||||||
|
|
||||||
|
var firmware *string
|
||||||
|
if conf.Provider.QEMU.Firmware != "" {
|
||||||
|
firmware = &conf.Provider.QEMU.Firmware
|
||||||
|
}
|
||||||
|
|
||||||
nodeGroups := make(map[string]terraform.QEMUNodeGroup)
|
nodeGroups := make(map[string]terraform.QEMUNodeGroup)
|
||||||
for groupName, group := range conf.NodeGroups {
|
for groupName, group := range conf.NodeGroups {
|
||||||
nodeGroups[groupName] = terraform.QEMUNodeGroup{
|
nodeGroups[groupName] = terraform.QEMUNodeGroup{
|
||||||
@ -245,17 +321,22 @@ func qemuTerraformVars(conf *config.Config, imageRef string, libvirtURI, libvirt
|
|||||||
// TODO(malt3): auto select boot mode based on attestation variant.
|
// TODO(malt3): auto select boot mode based on attestation variant.
|
||||||
// requires image info v2.
|
// requires image info v2.
|
||||||
BootMode: "uefi",
|
BootMode: "uefi",
|
||||||
ImagePath: imageRef,
|
ImagePath: imagePath,
|
||||||
ImageFormat: conf.Provider.QEMU.ImageFormat,
|
ImageFormat: conf.Provider.QEMU.ImageFormat,
|
||||||
NodeGroups: nodeGroups,
|
NodeGroups: nodeGroups,
|
||||||
Machine: "q35", // TODO(elchead): make configurable AB#3225
|
Machine: "q35", // TODO(elchead): make configurable AB#3225
|
||||||
MetadataAPIImage: conf.Provider.QEMU.MetadataAPIImage,
|
MetadataAPIImage: conf.Provider.QEMU.MetadataAPIImage,
|
||||||
MetadataLibvirtURI: metadataLibvirtURI,
|
MetadataLibvirtURI: metadataLibvirtURI,
|
||||||
NVRAM: conf.Provider.QEMU.NVRAM,
|
NVRAM: conf.Provider.QEMU.NVRAM,
|
||||||
|
Firmware: firmware,
|
||||||
// TODO(malt3) enable once we have a way to auto-select values for these
|
// TODO(malt3) enable once we have a way to auto-select values for these
|
||||||
// requires image info v2.
|
// requires image info v2.
|
||||||
// BzImagePath: placeholder,
|
// BzImagePath: placeholder,
|
||||||
// InitrdPath: placeholder,
|
// InitrdPath: placeholder,
|
||||||
// KernelCmdline: placeholder,
|
// KernelCmdline: placeholder,
|
||||||
}
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toPtr[T any](v T) *T {
|
||||||
|
return &v
|
||||||
}
|
}
|
||||||
|
68
cli/internal/cloudcmd/tfvars_test.go
Normal file
68
cli/internal/cloudcmd/tfvars_test.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cloudcmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNormalizeAzureURIs(t *testing.T) {
|
||||||
|
testCases := map[string]struct {
|
||||||
|
in *terraform.AzureClusterVariables
|
||||||
|
want *terraform.AzureClusterVariables
|
||||||
|
}{
|
||||||
|
"empty": {
|
||||||
|
in: &terraform.AzureClusterVariables{},
|
||||||
|
want: &terraform.AzureClusterVariables{},
|
||||||
|
},
|
||||||
|
"no change": {
|
||||||
|
in: &terraform.AzureClusterVariables{
|
||||||
|
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
||||||
|
},
|
||||||
|
want: &terraform.AzureClusterVariables{
|
||||||
|
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fix image id": {
|
||||||
|
in: &terraform.AzureClusterVariables{
|
||||||
|
ImageID: "/CommunityGalleries/foo/Images/constellation/Versions/2.1.0",
|
||||||
|
},
|
||||||
|
want: &terraform.AzureClusterVariables{
|
||||||
|
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fix resource group": {
|
||||||
|
in: &terraform.AzureClusterVariables{
|
||||||
|
UserAssignedIdentity: "/subscriptions/foo/resourcegroups/test/providers/Microsoft.ManagedIdentity/userAssignedIdentities/uai",
|
||||||
|
},
|
||||||
|
want: &terraform.AzureClusterVariables{
|
||||||
|
UserAssignedIdentity: "/subscriptions/foo/resourceGroups/test/providers/Microsoft.ManagedIdentity/userAssignedIdentities/uai",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fix arbitrary casing": {
|
||||||
|
in: &terraform.AzureClusterVariables{
|
||||||
|
ImageID: "/CoMMUnitygaLLeries/foo/iMAges/constellation/vERsions/2.1.0",
|
||||||
|
UserAssignedIdentity: "/subsCRiptions/foo/resoURCegroups/test/proViDers/MICROsoft.mANAgedIdentity/USerASsignediDENtities/uai",
|
||||||
|
},
|
||||||
|
want: &terraform.AzureClusterVariables{
|
||||||
|
ImageID: "/communityGalleries/foo/images/constellation/versions/2.1.0",
|
||||||
|
UserAssignedIdentity: "/subscriptions/foo/resourceGroups/test/providers/Microsoft.ManagedIdentity/userAssignedIdentities/uai",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
out := normalizeAzureURIs(tc.in)
|
||||||
|
assert.Equal(tc.want, out)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -221,9 +221,10 @@ func runApply(cmd *cobra.Command, _ []string) error {
|
|||||||
upgradeID := generateUpgradeID(upgradeCmdKindApply)
|
upgradeID := generateUpgradeID(upgradeCmdKindApply)
|
||||||
upgradeDir := filepath.Join(constants.UpgradeDir, upgradeID)
|
upgradeDir := filepath.Join(constants.UpgradeDir, upgradeID)
|
||||||
|
|
||||||
newClusterApplier := func(ctx context.Context) (clusterUpgrader, error) {
|
newInfraApplier := func(ctx context.Context) (cloudApplier, func(), error) {
|
||||||
return cloudcmd.NewClusterUpgrader(
|
return cloudcmd.NewApplier(
|
||||||
ctx,
|
ctx,
|
||||||
|
spinner,
|
||||||
constants.TerraformWorkingDir,
|
constants.TerraformWorkingDir,
|
||||||
upgradeDir,
|
upgradeDir,
|
||||||
flags.tfLogLevel,
|
flags.tfLogLevel,
|
||||||
@ -232,16 +233,16 @@ func runApply(cmd *cobra.Command, _ []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
apply := &applyCmd{
|
apply := &applyCmd{
|
||||||
fileHandler: fileHandler,
|
fileHandler: fileHandler,
|
||||||
flags: flags,
|
flags: flags,
|
||||||
log: log,
|
log: log,
|
||||||
spinner: spinner,
|
spinner: spinner,
|
||||||
merger: &kubeconfigMerger{log: log},
|
merger: &kubeconfigMerger{log: log},
|
||||||
quotaChecker: license.NewClient(),
|
quotaChecker: license.NewClient(),
|
||||||
newHelmClient: newHelmClient,
|
newHelmClient: newHelmClient,
|
||||||
newDialer: newDialer,
|
newDialer: newDialer,
|
||||||
newKubeUpgrader: newKubeUpgrader,
|
newKubeUpgrader: newKubeUpgrader,
|
||||||
newClusterApplier: newClusterApplier,
|
newInfraApplier: newInfraApplier,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(cmd.Context(), time.Hour)
|
ctx, cancel := context.WithTimeout(cmd.Context(), time.Hour)
|
||||||
@ -261,10 +262,10 @@ type applyCmd struct {
|
|||||||
merger configMerger
|
merger configMerger
|
||||||
quotaChecker license.QuotaChecker
|
quotaChecker license.QuotaChecker
|
||||||
|
|
||||||
newHelmClient func(kubeConfigPath string, log debugLog) (helmApplier, error)
|
newHelmClient func(kubeConfigPath string, log debugLog) (helmApplier, error)
|
||||||
newDialer func(validator atls.Validator) *dialer.Dialer
|
newDialer func(validator atls.Validator) *dialer.Dialer
|
||||||
newKubeUpgrader func(out io.Writer, kubeConfigPath string, log debugLog) (kubernetesUpgrader, error)
|
newKubeUpgrader func(out io.Writer, kubeConfigPath string, log debugLog) (kubernetesUpgrader, error)
|
||||||
newClusterApplier func(context.Context) (clusterUpgrader, error)
|
newInfraApplier func(context.Context) (cloudApplier, func(), error)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -20,10 +20,11 @@ import (
|
|||||||
// runTerraformApply checks if changes to Terraform are required and applies them.
|
// runTerraformApply checks if changes to Terraform are required and applies them.
|
||||||
func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error {
|
func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error {
|
||||||
a.log.Debugf("Checking if Terraform migrations are required")
|
a.log.Debugf("Checking if Terraform migrations are required")
|
||||||
terraformClient, err := a.newClusterApplier(cmd.Context())
|
terraformClient, removeInstaller, err := a.newInfraApplier(cmd.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating Terraform client: %w", err)
|
return fmt.Errorf("creating Terraform client: %w", err)
|
||||||
}
|
}
|
||||||
|
defer removeInstaller()
|
||||||
|
|
||||||
migrationRequired, err := a.planTerraformMigration(cmd, conf, terraformClient)
|
migrationRequired, err := a.planTerraformMigration(cmd, conf, terraformClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -58,13 +59,8 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// planTerraformMigration checks if the Constellation version the cluster is being upgraded to requires a migration.
|
// planTerraformMigration checks if the Constellation version the cluster is being upgraded to requires a migration.
|
||||||
func (a *applyCmd) planTerraformMigration(cmd *cobra.Command, conf *config.Config, terraformClient clusterUpgrader) (bool, error) {
|
func (a *applyCmd) planTerraformMigration(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier) (bool, error) {
|
||||||
a.log.Debugf("Planning Terraform migrations")
|
a.log.Debugf("Planning Terraform migrations")
|
||||||
vars, err := cloudcmd.TerraformUpgradeVars(conf)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("parsing upgrade variables: %w", err)
|
|
||||||
}
|
|
||||||
a.log.Debugf("Using Terraform variables:\n%+v", vars)
|
|
||||||
|
|
||||||
// Check if there are any Terraform migrations to apply
|
// Check if there are any Terraform migrations to apply
|
||||||
|
|
||||||
@ -73,16 +69,16 @@ func (a *applyCmd) planTerraformMigration(cmd *cobra.Command, conf *config.Confi
|
|||||||
// var manualMigrations []terraform.StateMigration
|
// var manualMigrations []terraform.StateMigration
|
||||||
// for _, migration := range manualMigrations {
|
// for _, migration := range manualMigrations {
|
||||||
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
|
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
|
||||||
// u.upgrader.AddManualStateMigration(migration)
|
// u.infraApplier.AddManualStateMigration(migration)
|
||||||
// }
|
// }
|
||||||
|
|
||||||
a.spinner.Start("Checking for infrastructure changes", false)
|
a.spinner.Start("Checking for infrastructure changes", false)
|
||||||
defer a.spinner.Stop()
|
defer a.spinner.Stop()
|
||||||
return terraformClient.PlanClusterUpgrade(cmd.Context(), a.spinner, vars, conf.GetProvider())
|
return terraformClient.Plan(cmd.Context(), conf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// migrateTerraform migrates an existing Terraform state and the post-migration infrastructure state is returned.
|
// migrateTerraform migrates an existing Terraform state and the post-migration infrastructure state is returned.
|
||||||
func (a *applyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, terraformClient clusterUpgrader, upgradeDir string) (state.Infrastructure, error) {
|
func (a *applyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier, upgradeDir string) (state.Infrastructure, error) {
|
||||||
// Ask for confirmation first
|
// Ask for confirmation first
|
||||||
cmd.Println("The upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template.")
|
cmd.Println("The upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template.")
|
||||||
if !a.flags.yes {
|
if !a.flags.yes {
|
||||||
@ -94,7 +90,7 @@ func (a *applyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, ter
|
|||||||
cmd.Println("Aborting upgrade.")
|
cmd.Println("Aborting upgrade.")
|
||||||
// User doesn't expect to see any changes in his workspace after aborting an "upgrade apply",
|
// User doesn't expect to see any changes in his workspace after aborting an "upgrade apply",
|
||||||
// therefore, roll back to the backed up state.
|
// therefore, roll back to the backed up state.
|
||||||
if err := terraformClient.RestoreClusterWorkspace(); err != nil {
|
if err := terraformClient.RestoreWorkspace(); err != nil {
|
||||||
return state.Infrastructure{}, fmt.Errorf(
|
return state.Infrastructure{}, fmt.Errorf(
|
||||||
"restoring Terraform workspace: %w, restore the Terraform workspace manually from %s ",
|
"restoring Terraform workspace: %w, restore the Terraform workspace manually from %s ",
|
||||||
err,
|
err,
|
||||||
@ -107,7 +103,7 @@ func (a *applyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, ter
|
|||||||
a.log.Debugf("Applying Terraform migrations")
|
a.log.Debugf("Applying Terraform migrations")
|
||||||
|
|
||||||
a.spinner.Start("Migrating Terraform resources", false)
|
a.spinner.Start("Migrating Terraform resources", false)
|
||||||
infraState, err := terraformClient.ApplyClusterUpgrade(cmd.Context(), conf.GetProvider())
|
infraState, err := terraformClient.Apply(cmd.Context(), conf.GetProvider(), cloudcmd.WithoutRollbackOnError)
|
||||||
a.spinner.Stop()
|
a.spinner.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Infrastructure{}, fmt.Errorf("applying terraform migrations: %w", err)
|
return state.Infrastructure{}, fmt.Errorf("applying terraform migrations: %w", err)
|
||||||
|
@ -14,13 +14,13 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cloudCreator interface {
|
type cloudApplier interface {
|
||||||
Create(
|
Plan(ctx context.Context, conf *config.Config) (bool, error)
|
||||||
ctx context.Context,
|
Apply(ctx context.Context, csp cloudprovider.Provider, rollback cloudcmd.RollbackBehavior) (state.Infrastructure, error)
|
||||||
opts cloudcmd.CreateOptions,
|
RestoreWorkspace() error
|
||||||
) (state.Infrastructure, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type cloudIAMCreator interface {
|
type cloudIAMCreator interface {
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -26,17 +27,25 @@ func TestMain(m *testing.M) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type stubCloudCreator struct {
|
type stubCloudCreator struct {
|
||||||
createCalled bool
|
state state.Infrastructure
|
||||||
state state.Infrastructure
|
planCalled bool
|
||||||
createErr error
|
planErr error
|
||||||
|
applyCalled bool
|
||||||
|
applyErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubCloudCreator) Create(
|
func (c *stubCloudCreator) Plan(_ context.Context, _ *config.Config) (bool, error) {
|
||||||
_ context.Context,
|
c.planCalled = true
|
||||||
_ cloudcmd.CreateOptions,
|
return false, c.planErr
|
||||||
) (state.Infrastructure, error) {
|
}
|
||||||
c.createCalled = true
|
|
||||||
return c.state, c.createErr
|
func (c *stubCloudCreator) Apply(_ context.Context, _ cloudprovider.Provider, _ cloudcmd.RollbackBehavior) (state.Infrastructure, error) {
|
||||||
|
c.applyCalled = true
|
||||||
|
return c.state, c.applyErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stubCloudCreator) RestoreWorkspace() error {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubCloudTerminator struct {
|
type stubCloudTerminator struct {
|
||||||
|
@ -10,6 +10,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
@ -76,18 +78,30 @@ func runCreate(cmd *cobra.Command, _ []string) error {
|
|||||||
defer spinner.Stop()
|
defer spinner.Stop()
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
creator := cloudcmd.NewCreator(spinner)
|
|
||||||
c := &createCmd{log: log}
|
c := &createCmd{log: log}
|
||||||
if err := c.flags.parse(cmd.Flags()); err != nil {
|
if err := c.flags.parse(cmd.Flags()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.log.Debugf("Using flags: %+v", c.flags)
|
c.log.Debugf("Using flags: %+v", c.flags)
|
||||||
|
|
||||||
|
applier, removeInstaller, err := cloudcmd.NewApplier(
|
||||||
|
cmd.Context(),
|
||||||
|
spinner,
|
||||||
|
constants.TerraformWorkingDir,
|
||||||
|
filepath.Join(constants.UpgradeDir, "create"), // Not used by create
|
||||||
|
c.flags.tfLogLevel,
|
||||||
|
fileHandler,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer removeInstaller()
|
||||||
|
|
||||||
fetcher := attestationconfigapi.NewFetcher()
|
fetcher := attestationconfigapi.NewFetcher()
|
||||||
return c.create(cmd, creator, fileHandler, spinner, fetcher)
|
return c.create(cmd, applier, fileHandler, spinner, fetcher)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler file.Handler, spinner spinnerInterf, fetcher attestationconfigapi.Fetcher) (retErr error) {
|
func (c *createCmd) create(cmd *cobra.Command, applier cloudApplier, fileHandler file.Handler, spinner spinnerInterf, fetcher attestationconfigapi.Fetcher) (retErr error) {
|
||||||
if err := c.checkDirClean(fileHandler); err != nil {
|
if err := c.checkDirClean(fileHandler); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -136,8 +150,6 @@ func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler
|
|||||||
cmd.PrintErrln("")
|
cmd.PrintErrln("")
|
||||||
}
|
}
|
||||||
|
|
||||||
provider := conf.GetProvider()
|
|
||||||
|
|
||||||
controlPlaneGroup, ok := conf.NodeGroups[constants.DefaultControlPlaneGroupName]
|
controlPlaneGroup, ok := conf.NodeGroups[constants.DefaultControlPlaneGroupName]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("default control-plane node group %q not found in configuration", constants.DefaultControlPlaneGroupName)
|
return fmt.Errorf("default control-plane node group %q not found in configuration", constants.DefaultControlPlaneGroupName)
|
||||||
@ -176,13 +188,10 @@ func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler
|
|||||||
}
|
}
|
||||||
|
|
||||||
spinner.Start("Creating", false)
|
spinner.Start("Creating", false)
|
||||||
opts := cloudcmd.CreateOptions{
|
if _, err := applier.Plan(cmd.Context(), conf); err != nil {
|
||||||
Provider: provider,
|
return fmt.Errorf("planning infrastructure creation: %w", err)
|
||||||
Config: conf,
|
|
||||||
TFLogLevel: c.flags.tfLogLevel,
|
|
||||||
TFWorkspace: constants.TerraformWorkingDir,
|
|
||||||
}
|
}
|
||||||
infraState, err := creator.Create(cmd.Context(), opts)
|
infraState, err := applier.Apply(cmd.Context(), conf.GetProvider(), cloudcmd.WithRollbackOnError)
|
||||||
spinner.Stop()
|
spinner.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -218,10 +227,12 @@ func (c *createCmd) checkDirClean(fileHandler file.Handler) error {
|
|||||||
c.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename),
|
c.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
c.log.Debugf("Checking Terraform working directory")
|
c.log.Debugf("Checking terraform working directory")
|
||||||
if _, err := fileHandler.Stat(constants.TerraformWorkingDir); !errors.Is(err, fs.ErrNotExist) {
|
if clean, err := fileHandler.IsEmpty(constants.TerraformWorkingDir); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return fmt.Errorf("checking if terraform working directory is empty: %w", err)
|
||||||
|
} else if err == nil && !clean {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"directory '%s' already exists in working directory, run 'constellation terminate' before creating a new one",
|
"directory '%s' already exists and is not empty, run 'constellation terminate' before creating a new one",
|
||||||
c.flags.pathPrefixer.PrefixPrintablePath(constants.TerraformWorkingDir),
|
c.flags.pathPrefixer.PrefixPrintablePath(constants.TerraformWorkingDir),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
@ -45,7 +44,6 @@ func TestCreate(t *testing.T) {
|
|||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
infraState := state.Infrastructure{ClusterEndpoint: "192.0.2.1"}
|
infraState := state.Infrastructure{ClusterEndpoint: "192.0.2.1"}
|
||||||
someErr := errors.New("failed")
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
setupFs func(*require.Assertions, cloudprovider.Provider) afero.Fs
|
setupFs func(*require.Assertions, cloudprovider.Provider) afero.Fs
|
||||||
@ -125,7 +123,7 @@ func TestCreate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"create error": {
|
"create error": {
|
||||||
setupFs: fsWithDefaultConfigAndState,
|
setupFs: fsWithDefaultConfigAndState,
|
||||||
creator: &stubCloudCreator{createErr: someErr},
|
creator: &stubCloudCreator{applyErr: assert.AnError},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -163,9 +161,11 @@ func TestCreate(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
if tc.wantAbort {
|
if tc.wantAbort {
|
||||||
assert.False(tc.creator.createCalled)
|
assert.False(tc.creator.planCalled)
|
||||||
|
assert.False(tc.creator.applyCalled)
|
||||||
} else {
|
} else {
|
||||||
assert.True(tc.creator.createCalled)
|
assert.True(tc.creator.planCalled)
|
||||||
|
assert.True(tc.creator.applyCalled)
|
||||||
|
|
||||||
var gotState state.State
|
var gotState state.State
|
||||||
expectedState := state.Infrastructure{
|
expectedState := state.Infrastructure{
|
||||||
|
@ -277,7 +277,9 @@ func TestInitialize(t *testing.T) {
|
|||||||
getClusterAttestationConfigErr: k8serrors.NewNotFound(schema.GroupResource{}, ""),
|
getClusterAttestationConfigErr: k8serrors.NewNotFound(schema.GroupResource{}, ""),
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
newClusterApplier: func(ctx context.Context) (clusterUpgrader, error) { return stubTerraformUpgrader{}, nil },
|
newInfraApplier: func(ctx context.Context) (cloudApplier, func(), error) {
|
||||||
|
return stubTerraformUpgrader{}, func() {}, nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := i.apply(cmd, stubAttestationFetcher{}, "test")
|
err := i.apply(cmd, stubAttestationFetcher{}, "test")
|
||||||
|
@ -10,6 +10,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||||
@ -58,7 +60,6 @@ func runUp(cmd *cobra.Command, _ []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer spinner.Stop()
|
defer spinner.Stop()
|
||||||
creator := cloudcmd.NewCreator(spinner)
|
|
||||||
|
|
||||||
m := &miniUpCmd{
|
m := &miniUpCmd{
|
||||||
log: log,
|
log: log,
|
||||||
@ -68,15 +69,38 @@ func runUp(cmd *cobra.Command, _ []string) error {
|
|||||||
if err := m.flags.parse(cmd.Flags()); err != nil {
|
if err := m.flags.parse(cmd.Flags()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
creator, cleanUp, err := cloudcmd.NewApplier(
|
||||||
|
cmd.Context(),
|
||||||
|
spinner,
|
||||||
|
constants.TerraformWorkingDir,
|
||||||
|
filepath.Join(constants.UpgradeDir, "create"), // Not used by create
|
||||||
|
m.flags.tfLogLevel,
|
||||||
|
m.fileHandler,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cleanUp()
|
||||||
|
|
||||||
return m.up(cmd, creator, spinner)
|
return m.up(cmd, creator, spinner)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *miniUpCmd) up(cmd *cobra.Command, creator cloudCreator, spinner spinnerInterf) error {
|
func (m *miniUpCmd) up(cmd *cobra.Command, creator cloudApplier, spinner spinnerInterf) error {
|
||||||
if err := m.checkSystemRequirements(cmd.ErrOrStderr()); err != nil {
|
if err := m.checkSystemRequirements(cmd.ErrOrStderr()); err != nil {
|
||||||
return fmt.Errorf("system requirements not met: %w", err)
|
return fmt.Errorf("system requirements not met: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create config if not passed as flag and set default values
|
if clean, err := m.fileHandler.IsEmpty(constants.TerraformWorkingDir); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return fmt.Errorf("checking if terraform working directory is empty: %w", err)
|
||||||
|
} else if err == nil && !clean {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"directory %q already exists and is not empty, run 'constellation mini down' before creating a new cluster",
|
||||||
|
m.flags.pathPrefixer.PrefixPrintablePath(constants.TerraformWorkingDir),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create config if not present in directory and set default values
|
||||||
config, err := m.prepareConfig(cmd)
|
config, err := m.prepareConfig(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("preparing config: %w", err)
|
return fmt.Errorf("preparing config: %w", err)
|
||||||
@ -159,15 +183,12 @@ func (m *miniUpCmd) prepareExistingConfig(cmd *cobra.Command) (*config.Config, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createMiniCluster creates a new cluster using the given config.
|
// createMiniCluster creates a new cluster using the given config.
|
||||||
func (m *miniUpCmd) createMiniCluster(ctx context.Context, creator cloudCreator, config *config.Config) error {
|
func (m *miniUpCmd) createMiniCluster(ctx context.Context, creator cloudApplier, config *config.Config) error {
|
||||||
m.log.Debugf("Creating mini cluster")
|
m.log.Debugf("Creating mini cluster")
|
||||||
opts := cloudcmd.CreateOptions{
|
if _, err := creator.Plan(ctx, config); err != nil {
|
||||||
Provider: cloudprovider.QEMU,
|
return err
|
||||||
Config: config,
|
|
||||||
TFWorkspace: constants.TerraformWorkingDir,
|
|
||||||
TFLogLevel: m.flags.tfLogLevel,
|
|
||||||
}
|
}
|
||||||
infraState, err := creator.Create(ctx, opts)
|
infraState, err := creator.Apply(ctx, config.GetProvider(), cloudcmd.WithoutRollbackOnError)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -9,13 +9,9 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/rogpeppe/go-internal/diff"
|
"github.com/rogpeppe/go-internal/diff"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -73,9 +69,3 @@ type kubernetesUpgrader interface {
|
|||||||
BackupCRs(ctx context.Context, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error
|
BackupCRs(ctx context.Context, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error
|
||||||
BackupCRDs(ctx context.Context, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error)
|
BackupCRDs(ctx context.Context, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type clusterUpgrader interface {
|
|
||||||
PlanClusterUpgrade(ctx context.Context, outWriter io.Writer, vars terraform.Variables, csp cloudprovider.Provider) (bool, error)
|
|
||||||
ApplyClusterUpgrade(ctx context.Context, csp cloudprovider.Provider) (state.Infrastructure, error)
|
|
||||||
RestoreClusterWorkspace() error
|
|
||||||
}
|
|
||||||
|
@ -12,10 +12,10 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
@ -53,7 +53,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
kubeUpgrader *stubKubernetesUpgrader
|
kubeUpgrader *stubKubernetesUpgrader
|
||||||
fh func() file.Handler
|
fh func() file.Handler
|
||||||
fhAssertions func(require *require.Assertions, assert *assert.Assertions, fh file.Handler)
|
fhAssertions func(require *require.Assertions, assert *assert.Assertions, fh file.Handler)
|
||||||
terraformUpgrader clusterUpgrader
|
terraformUpgrader cloudApplier
|
||||||
wantErr bool
|
wantErr bool
|
||||||
customK8sVersion string
|
customK8sVersion string
|
||||||
flags applyFlags
|
flags applyFlags
|
||||||
@ -265,7 +265,9 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
newKubeUpgrader: func(_ io.Writer, _ string, _ debugLog) (kubernetesUpgrader, error) {
|
newKubeUpgrader: func(_ io.Writer, _ string, _ debugLog) (kubernetesUpgrader, error) {
|
||||||
return tc.kubeUpgrader, nil
|
return tc.kubeUpgrader, nil
|
||||||
},
|
},
|
||||||
newClusterApplier: func(ctx context.Context) (clusterUpgrader, error) { return tc.terraformUpgrader, nil },
|
newInfraApplier: func(ctx context.Context) (cloudApplier, func(), error) {
|
||||||
|
return tc.terraformUpgrader, func() {}, nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
err := upgrader.apply(cmd, stubAttestationFetcher{}, "test")
|
err := upgrader.apply(cmd, stubAttestationFetcher{}, "test")
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
@ -321,16 +323,6 @@ func (u *stubKubernetesUpgrader) ExtendClusterConfigCertSANs(_ context.Context,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(v2.11): Remove this function after v2.11 is released.
|
|
||||||
func (u *stubKubernetesUpgrader) RemoveAttestationConfigHelmManagement(_ context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(v2.12): Remove this function.
|
|
||||||
func (u *stubKubernetesUpgrader) RemoveHelmKeepAnnotation(_ context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubTerraformUpgrader struct {
|
type stubTerraformUpgrader struct {
|
||||||
terraformDiff bool
|
terraformDiff bool
|
||||||
planTerraformErr error
|
planTerraformErr error
|
||||||
@ -338,15 +330,15 @@ type stubTerraformUpgrader struct {
|
|||||||
rollbackWorkspaceErr error
|
rollbackWorkspaceErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubTerraformUpgrader) PlanClusterUpgrade(_ context.Context, _ io.Writer, _ terraform.Variables, _ cloudprovider.Provider) (bool, error) {
|
func (u stubTerraformUpgrader) Plan(_ context.Context, _ *config.Config) (bool, error) {
|
||||||
return u.terraformDiff, u.planTerraformErr
|
return u.terraformDiff, u.planTerraformErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubTerraformUpgrader) ApplyClusterUpgrade(_ context.Context, _ cloudprovider.Provider) (state.Infrastructure, error) {
|
func (u stubTerraformUpgrader) Apply(_ context.Context, _ cloudprovider.Provider, _ cloudcmd.RollbackBehavior) (state.Infrastructure, error) {
|
||||||
return state.Infrastructure{}, u.applyTerraformErr
|
return state.Infrastructure{}, u.applyTerraformErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubTerraformUpgrader) RestoreClusterWorkspace() error {
|
func (u stubTerraformUpgrader) RestoreWorkspace() error {
|
||||||
return u.rollbackWorkspaceErr
|
return u.rollbackWorkspaceErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -354,17 +346,17 @@ type mockTerraformUpgrader struct {
|
|||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockTerraformUpgrader) PlanClusterUpgrade(ctx context.Context, w io.Writer, variables terraform.Variables, provider cloudprovider.Provider) (bool, error) {
|
func (m *mockTerraformUpgrader) Plan(ctx context.Context, conf *config.Config) (bool, error) {
|
||||||
args := m.Called(ctx, w, variables, provider)
|
args := m.Called(ctx, conf)
|
||||||
return args.Bool(0), args.Error(1)
|
return args.Bool(0), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockTerraformUpgrader) ApplyClusterUpgrade(ctx context.Context, provider cloudprovider.Provider) (state.Infrastructure, error) {
|
func (m *mockTerraformUpgrader) Apply(ctx context.Context, provider cloudprovider.Provider, rollback cloudcmd.RollbackBehavior) (state.Infrastructure, error) {
|
||||||
args := m.Called(ctx, provider)
|
args := m.Called(ctx, provider, rollback)
|
||||||
return args.Get(0).(state.Infrastructure), args.Error(1)
|
return args.Get(0).(state.Infrastructure), args.Error(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockTerraformUpgrader) RestoreClusterWorkspace() error {
|
func (m *mockTerraformUpgrader) RestoreWorkspace() error {
|
||||||
args := m.Called()
|
args := m.Called()
|
||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/cli/internal/featureset"
|
"github.com/edgelesssys/constellation/v2/cli/internal/featureset"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/fetcher"
|
"github.com/edgelesssys/constellation/v2/internal/api/fetcher"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||||
@ -104,8 +103,9 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
|||||||
upgradeID := generateUpgradeID(upgradeCmdKindCheck)
|
upgradeID := generateUpgradeID(upgradeCmdKindCheck)
|
||||||
|
|
||||||
upgradeDir := filepath.Join(constants.UpgradeDir, upgradeID)
|
upgradeDir := filepath.Join(constants.UpgradeDir, upgradeID)
|
||||||
tfClient, err := cloudcmd.NewClusterUpgrader(
|
tfClient, cleanUp, err := cloudcmd.NewApplier(
|
||||||
cmd.Context(),
|
cmd.Context(),
|
||||||
|
cmd.OutOrStdout(),
|
||||||
constants.TerraformWorkingDir,
|
constants.TerraformWorkingDir,
|
||||||
upgradeDir,
|
upgradeDir,
|
||||||
flags.tfLogLevel,
|
flags.tfLogLevel,
|
||||||
@ -114,6 +114,7 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("setting up Terraform upgrader: %w", err)
|
return fmt.Errorf("setting up Terraform upgrader: %w", err)
|
||||||
}
|
}
|
||||||
|
defer cleanUp()
|
||||||
|
|
||||||
kubeChecker, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, fileHandler, log)
|
kubeChecker, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, fileHandler, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -219,24 +220,17 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
|
|||||||
// var manualMigrations []terraform.StateMigration
|
// var manualMigrations []terraform.StateMigration
|
||||||
// for _, migration := range manualMigrations {
|
// for _, migration := range manualMigrations {
|
||||||
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
|
// u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName)
|
||||||
// u.upgrader.AddManualStateMigration(migration)
|
// u.terraformChecker.AddManualStateMigration(migration)
|
||||||
// }
|
// }
|
||||||
|
|
||||||
vars, err := cloudcmd.TerraformUpgradeVars(conf)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("parsing upgrade variables: %w", err)
|
|
||||||
}
|
|
||||||
u.log.Debugf("Using Terraform variables:\n%v", vars)
|
|
||||||
|
|
||||||
cmd.Println("The following Terraform migrations are available with this CLI:")
|
cmd.Println("The following Terraform migrations are available with this CLI:")
|
||||||
hasDiff, err := u.terraformChecker.PlanClusterUpgrade(cmd.Context(), cmd.OutOrStdout(), vars, conf.GetProvider())
|
hasDiff, err := u.terraformChecker.Plan(cmd.Context(), conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("planning terraform migrations: %w", err)
|
return fmt.Errorf("planning terraform migrations: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
// User doesn't expect to see any changes in his workspace after an "upgrade plan",
|
// User doesn't expect to see any changes in his workspace after an "upgrade plan",
|
||||||
// therefore, roll back to the backed up state.
|
// therefore, roll back to the backed up state.
|
||||||
if err := u.terraformChecker.RestoreClusterWorkspace(); err != nil {
|
if err := u.terraformChecker.RestoreWorkspace(); err != nil {
|
||||||
cmd.PrintErrf(
|
cmd.PrintErrf(
|
||||||
"restoring Terraform workspace: %s, restore the Terraform workspace manually from %s ",
|
"restoring Terraform workspace: %s, restore the Terraform workspace manually from %s ",
|
||||||
err,
|
err,
|
||||||
@ -727,8 +721,8 @@ type kubernetesChecker interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type terraformChecker interface {
|
type terraformChecker interface {
|
||||||
PlanClusterUpgrade(ctx context.Context, outWriter io.Writer, vars terraform.Variables, csp cloudprovider.Provider) (bool, error)
|
Plan(context.Context, *config.Config) (bool, error)
|
||||||
RestoreClusterWorkspace() error
|
RestoreWorkspace() error
|
||||||
}
|
}
|
||||||
|
|
||||||
type versionListFetcher interface {
|
type versionListFetcher interface {
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||||
@ -291,11 +290,11 @@ type stubTerraformChecker struct {
|
|||||||
rollbackErr error
|
rollbackErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s stubTerraformChecker) PlanClusterUpgrade(_ context.Context, _ io.Writer, _ terraform.Variables, _ cloudprovider.Provider) (bool, error) {
|
func (s stubTerraformChecker) Plan(_ context.Context, _ *config.Config) (bool, error) {
|
||||||
return s.tfDiff, s.planErr
|
return s.tfDiff, s.planErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s stubTerraformChecker) RestoreClusterWorkspace() error {
|
func (s stubTerraformChecker) RestoreWorkspace() error {
|
||||||
return s.rollbackErr
|
return s.rollbackErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
@ -52,8 +53,54 @@ func (r *Runner) Start(ctx context.Context, name, imageName string) error {
|
|||||||
}
|
}
|
||||||
defer docker.Close()
|
defer docker.Close()
|
||||||
|
|
||||||
containerName := name + "-libvirt"
|
// check for an existing container
|
||||||
|
if containerName, err := r.file.Read(r.nameFile); err == nil {
|
||||||
|
// check if a container with the same name already exists
|
||||||
|
containers, err := docker.ContainerList(ctx, types.ContainerListOptions{
|
||||||
|
Filters: filters.NewArgs(
|
||||||
|
filters.KeyValuePair{
|
||||||
|
Key: "name",
|
||||||
|
Value: fmt.Sprintf("^%s$", containerName),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
All: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(containers) > 1 {
|
||||||
|
return fmt.Errorf("more than one container with name %q found", containerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if a container with the same name exists,
|
||||||
|
// check if it is using the correct image and if it is running
|
||||||
|
if len(containers) == 1 {
|
||||||
|
// make sure the container we listed is using the correct image
|
||||||
|
imageBase := strings.Split(imageName, ":")[0]
|
||||||
|
if containers[0].Image != imageBase {
|
||||||
|
return fmt.Errorf("existing libvirt container %q is using a different image: expected %q, got %q", containerName, imageBase, containers[0].Image)
|
||||||
|
}
|
||||||
|
|
||||||
|
// container already exists, check if its running
|
||||||
|
if containers[0].State == "running" {
|
||||||
|
// container is up, nothing to do
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// container exists but is not running, remove it
|
||||||
|
// so we can start a new one
|
||||||
|
if err := docker.ContainerRemove(ctx, containers[0].ID, types.ContainerRemoveOptions{Force: true}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !errors.Is(err, afero.ErrFileNotFound) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.startNewContainer(ctx, docker, name+"-libvirt", imageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// startNewContainer starts a new libvirt container using the given image.
|
||||||
|
func (r *Runner) startNewContainer(ctx context.Context, docker *docker.Client, containerName, imageName string) error {
|
||||||
// check if image exists locally, if not pull it
|
// check if image exists locally, if not pull it
|
||||||
// this allows us to use a custom image without having to push it to a registry
|
// this allows us to use a custom image without having to push it to a registry
|
||||||
images, err := docker.ImageList(ctx, types.ImageListOptions{
|
images, err := docker.ImageList(ctx, types.ImageListOptions{
|
||||||
|
@ -237,3 +237,18 @@ func (h *Handler) CopyFile(src, dst string, opts ...Option) error {
|
|||||||
func (h *Handler) RenameFile(old, new string) error {
|
func (h *Handler) RenameFile(old, new string) error {
|
||||||
return h.fs.Rename(old, new)
|
return h.fs.Rename(old, new)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns true if the given directory is empty.
|
||||||
|
func (h *Handler) IsEmpty(dirName string) (bool, error) {
|
||||||
|
f, err := h.fs.Open(dirName)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
_, err = f.Readdirnames(1)
|
||||||
|
if err == io.EOF {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
@ -595,3 +595,45 @@ func TestRename(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsEmpty(t *testing.T) {
|
||||||
|
testCases := map[string]struct {
|
||||||
|
setupFs func(fs *afero.Afero, dirName string) error
|
||||||
|
wantIsEmpty bool
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
"empty directory": {
|
||||||
|
setupFs: func(fs *afero.Afero, dirName string) error { return fs.Mkdir(dirName, 0o755) },
|
||||||
|
wantIsEmpty: true,
|
||||||
|
},
|
||||||
|
"directory not empty": {
|
||||||
|
setupFs: func(fs *afero.Afero, dirName string) error {
|
||||||
|
return fs.WriteFile(filepath.Join(dirName, "file"), []byte("some content"), 0o755)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"directory not existent": {
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
require := require.New(t)
|
||||||
|
dirName := "test"
|
||||||
|
|
||||||
|
handler := NewHandler(afero.NewMemMapFs())
|
||||||
|
if tc.setupFs != nil {
|
||||||
|
require.NoError(tc.setupFs(handler.fs, dirName))
|
||||||
|
}
|
||||||
|
|
||||||
|
isEmpty, err := handler.IsEmpty(dirName)
|
||||||
|
if tc.wantErr {
|
||||||
|
assert.Error(err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal(tc.wantIsEmpty, isEmpty)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user