mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-02-25 17:21:24 -05:00
cli: enable constellation apply
to create new clusters (#2549)
* Allow creation of Constellation clusters using `apply` command * Add auto-completion for `--skip-phases` flag * Deprecate create command * Replace all doc references to create command with apply --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
82b68df92a
commit
4c8ce55e5a
@ -138,7 +138,12 @@ runs:
|
|||||||
if : inputs.selfManagedInfra != 'true'
|
if : inputs.selfManagedInfra != 'true'
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
constellation create -y --debug --tf-log=DEBUG
|
# TODO(v2.14): Remove workaround for CLIs not supporting apply command
|
||||||
|
cmd='apply --skip-phases="init,attestationconfig,certsans,helm,image,k8s"'
|
||||||
|
if constellation --help | grep -q init; then
|
||||||
|
cmd=create
|
||||||
|
fi
|
||||||
|
constellation $cmd -y --debug --tf-log=DEBUG -
|
||||||
|
|
||||||
- name: Constellation create (self-managed)
|
- name: Constellation create (self-managed)
|
||||||
if : inputs.selfManagedInfra == 'true'
|
if : inputs.selfManagedInfra == 'true'
|
||||||
@ -163,7 +168,7 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# TODO(v2.14): Remove workaround for CLIs not supporting apply command
|
# TODO(v2.14): Remove workaround for CLIs not supporting apply command
|
||||||
cmd=apply
|
cmd="apply --skip-phases=infrastructure"
|
||||||
if constellation --help | grep -q init; then
|
if constellation --help | grep -q init; then
|
||||||
cmd=init
|
cmd=init
|
||||||
fi
|
fi
|
||||||
|
@ -121,6 +121,11 @@ func (a *Applier) RestoreWorkspace() error {
|
|||||||
return restoreBackup(a.fileHandler, a.workingDir, filepath.Join(a.backupDir, constants.TerraformUpgradeBackupDir))
|
return restoreBackup(a.fileHandler, a.workingDir, filepath.Join(a.backupDir, constants.TerraformUpgradeBackupDir))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WorkingDirIsEmpty returns true if the working directory of the Applier is empty.
|
||||||
|
func (a *Applier) WorkingDirIsEmpty() (bool, error) {
|
||||||
|
return a.fileHandler.IsEmpty(a.workingDir)
|
||||||
|
}
|
||||||
|
|
||||||
func (a *Applier) terraformApplyVars(ctx context.Context, conf *config.Config) (terraform.Variables, error) {
|
func (a *Applier) terraformApplyVars(ctx context.Context, conf *config.Config) (terraform.Variables, error) {
|
||||||
imageRef, err := a.imageFetcher.FetchReference(
|
imageRef, err := a.imageFetcher.FetchReference(
|
||||||
ctx,
|
ctx,
|
||||||
|
@ -53,9 +53,9 @@ func plan(
|
|||||||
return false, fmt.Errorf("terraform plan: %w", err)
|
return false, fmt.Errorf("terraform plan: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are planning in a new workspace, we don't want to show a diff
|
// If we are planning in a new workspace, we don't want to show the plan
|
||||||
if isNewWorkspace {
|
if isNewWorkspace {
|
||||||
return false, nil
|
return hasDiff, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasDiff {
|
if hasDiff {
|
||||||
@ -67,6 +67,7 @@ func plan(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// restoreBackup replaces the existing Terraform workspace with the backup.
|
// restoreBackup replaces the existing Terraform workspace with the backup.
|
||||||
|
// If no backup exists, this function simply removes workingDir.
|
||||||
func restoreBackup(fileHandler file.Handler, workingDir, backupDir string) error {
|
func restoreBackup(fileHandler file.Handler, workingDir, backupDir string) error {
|
||||||
if err := fileHandler.RemoveAll(workingDir); err != nil {
|
if err := fileHandler.RemoveAll(workingDir); err != nil {
|
||||||
return fmt.Errorf("removing existing workspace: %w", err)
|
return fmt.Errorf("removing existing workspace: %w", err)
|
||||||
@ -74,7 +75,7 @@ func restoreBackup(fileHandler file.Handler, workingDir, backupDir string) error
|
|||||||
if err := fileHandler.CopyDir(
|
if err := fileHandler.CopyDir(
|
||||||
backupDir,
|
backupDir,
|
||||||
workingDir,
|
workingDir,
|
||||||
); err != nil {
|
); err != nil && !errors.Is(err, os.ErrNotExist) { // ignore not found error because backup does not exist for new clusters
|
||||||
return fmt.Errorf("replacing terraform workspace with backup: %w", err)
|
return fmt.Errorf("replacing terraform workspace with backup: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ package cloudcmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -123,40 +124,42 @@ func TestTFPlan(t *testing.T) {
|
|||||||
func TestRestoreBackup(t *testing.T) {
|
func TestRestoreBackup(t *testing.T) {
|
||||||
existingWorkspace := "foo"
|
existingWorkspace := "foo"
|
||||||
backupDir := "bar"
|
backupDir := "bar"
|
||||||
|
testFile := "file"
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
prepareFs func(require *require.Assertions) file.Handler
|
prepareFs func(require *require.Assertions) file.Handler
|
||||||
|
wantRemoveWorkingDir bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"success": {
|
"success": {
|
||||||
prepareFs: func(require *require.Assertions) file.Handler {
|
prepareFs: func(require *require.Assertions) file.Handler {
|
||||||
fs := file.NewHandler(afero.NewMemMapFs())
|
fs := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fs.MkdirAll(existingWorkspace))
|
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
|
||||||
require.NoError(fs.MkdirAll(backupDir))
|
require.NoError(fs.Write(filepath.Join(backupDir, testFile), []byte{}, file.OptMkdirAll))
|
||||||
return fs
|
return fs
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"existing workspace does not exist": {
|
"only backup exists": {
|
||||||
prepareFs: func(require *require.Assertions) file.Handler {
|
prepareFs: func(require *require.Assertions) file.Handler {
|
||||||
fs := file.NewHandler(afero.NewMemMapFs())
|
fs := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fs.MkdirAll(backupDir))
|
require.NoError(fs.Write(filepath.Join(backupDir, testFile), []byte{}, file.OptMkdirAll))
|
||||||
return fs
|
return fs
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"backup dir does not exist": {
|
"only existingWorkspace exists": {
|
||||||
prepareFs: func(require *require.Assertions) file.Handler {
|
prepareFs: func(require *require.Assertions) file.Handler {
|
||||||
fs := file.NewHandler(afero.NewMemMapFs())
|
fs := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fs.MkdirAll(existingWorkspace))
|
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
|
||||||
return fs
|
return fs
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantRemoveWorkingDir: true,
|
||||||
},
|
},
|
||||||
"read only file system": {
|
"read only file system": {
|
||||||
prepareFs: func(require *require.Assertions) file.Handler {
|
prepareFs: func(require *require.Assertions) file.Handler {
|
||||||
memFS := afero.NewMemMapFs()
|
memFS := afero.NewMemMapFs()
|
||||||
fs := file.NewHandler(memFS)
|
fs := file.NewHandler(memFS)
|
||||||
require.NoError(fs.MkdirAll(existingWorkspace))
|
require.NoError(fs.Write(filepath.Join(existingWorkspace, testFile), []byte{}, file.OptMkdirAll))
|
||||||
require.NoError(fs.MkdirAll(backupDir))
|
require.NoError(fs.Write(filepath.Join(backupDir, testFile), []byte{}, file.OptMkdirAll))
|
||||||
return file.NewHandler(afero.NewReadOnlyFs(memFS))
|
return file.NewHandler(afero.NewReadOnlyFs(memFS))
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -174,6 +177,14 @@ func TestRestoreBackup(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
_, err = fs.Stat(filepath.Join(backupDir, testFile))
|
||||||
|
assert.ErrorIs(err, os.ErrNotExist)
|
||||||
|
_, err = fs.Stat(filepath.Join(existingWorkspace, testFile))
|
||||||
|
if tc.wantRemoveWorkingDir {
|
||||||
|
assert.ErrorIs(err, os.ErrNotExist)
|
||||||
|
} else {
|
||||||
|
assert.NoError(err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
@ -61,8 +60,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// allPhases returns a list of all phases that can be skipped as strings.
|
// allPhases returns a list of all phases that can be skipped as strings.
|
||||||
func allPhases() []string {
|
func allPhases(except ...skipPhase) []string {
|
||||||
return []string{
|
phases := []string{
|
||||||
string(skipInfrastructurePhase),
|
string(skipInfrastructurePhase),
|
||||||
string(skipInitPhase),
|
string(skipInitPhase),
|
||||||
string(skipAttestationConfigPhase),
|
string(skipAttestationConfigPhase),
|
||||||
@ -71,6 +70,14 @@ func allPhases() []string {
|
|||||||
string(skipImagePhase),
|
string(skipImagePhase),
|
||||||
string(skipK8sPhase),
|
string(skipK8sPhase),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var returnedPhases []string
|
||||||
|
for idx, phase := range phases {
|
||||||
|
if !slices.Contains(except, skipPhase(phase)) {
|
||||||
|
returnedPhases = append(returnedPhases, phases[idx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return returnedPhases
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatSkipPhases returns a formatted string of all phases that can be skipped.
|
// formatSkipPhases returns a formatted string of all phases that can be skipped.
|
||||||
@ -84,10 +91,14 @@ type skipPhase string
|
|||||||
// skipPhases is a list of phases that can be skipped during the upgrade process.
|
// skipPhases is a list of phases that can be skipped during the upgrade process.
|
||||||
type skipPhases map[skipPhase]struct{}
|
type skipPhases map[skipPhase]struct{}
|
||||||
|
|
||||||
// contains returns true if the list of phases contains the given phase.
|
// contains returns true if skipPhases contains all of the given phases.
|
||||||
func (s skipPhases) contains(phase skipPhase) bool {
|
func (s skipPhases) contains(phases ...skipPhase) bool {
|
||||||
_, ok := s[skipPhase(strings.ToLower(string(phase)))]
|
for _, phase := range phases {
|
||||||
return ok
|
if _, ok := s[skipPhase(strings.ToLower(string(phase)))]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// add a phase to the list of phases.
|
// add a phase to the list of phases.
|
||||||
@ -122,6 +133,7 @@ func NewApplyCmd() *cobra.Command {
|
|||||||
|
|
||||||
must(cmd.Flags().MarkHidden("timeout"))
|
must(cmd.Flags().MarkHidden("timeout"))
|
||||||
|
|
||||||
|
must(cmd.RegisterFlagCompletionFunc("skip-phases", skipPhasesCompletion))
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,7 +250,6 @@ func runApply(cmd *cobra.Command, _ []string) error {
|
|||||||
log: log,
|
log: log,
|
||||||
spinner: spinner,
|
spinner: spinner,
|
||||||
merger: &kubeconfigMerger{log: log},
|
merger: &kubeconfigMerger{log: log},
|
||||||
quotaChecker: license.NewClient(),
|
|
||||||
newHelmClient: newHelmClient,
|
newHelmClient: newHelmClient,
|
||||||
newDialer: newDialer,
|
newDialer: newDialer,
|
||||||
newKubeUpgrader: newKubeUpgrader,
|
newKubeUpgrader: newKubeUpgrader,
|
||||||
@ -249,7 +260,7 @@ func runApply(cmd *cobra.Command, _ []string) error {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
cmd.SetContext(ctx)
|
cmd.SetContext(ctx)
|
||||||
|
|
||||||
return apply.apply(cmd, attestationconfigapi.NewFetcher(), upgradeDir)
|
return apply.apply(cmd, attestationconfigapi.NewFetcher(), license.NewClient(), upgradeDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
type applyCmd struct {
|
type applyCmd struct {
|
||||||
@ -260,7 +271,6 @@ type applyCmd struct {
|
|||||||
spinner spinnerInterf
|
spinner spinnerInterf
|
||||||
|
|
||||||
merger configMerger
|
merger configMerger
|
||||||
quotaChecker license.QuotaChecker
|
|
||||||
|
|
||||||
newHelmClient func(kubeConfigPath string, log debugLog) (helmApplier, error)
|
newHelmClient func(kubeConfigPath string, log debugLog) (helmApplier, error)
|
||||||
newDialer func(validator atls.Validator) *dialer.Dialer
|
newDialer func(validator atls.Validator) *dialer.Dialer
|
||||||
@ -336,25 +346,24 @@ The control flow is as follows:
|
|||||||
│Write success output│
|
│Write success output│
|
||||||
└────────────────────┘
|
└────────────────────┘
|
||||||
*/
|
*/
|
||||||
func (a *applyCmd) apply(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher, upgradeDir string) error {
|
func (a *applyCmd) apply(
|
||||||
// Migrate state file
|
cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher,
|
||||||
stateFile, err := state.ReadFromFile(a.fileHandler, constants.StateFilename)
|
quotaChecker license.QuotaChecker, upgradeDir string,
|
||||||
if err != nil {
|
) error {
|
||||||
return fmt.Errorf("reading state file: %w", err)
|
|
||||||
}
|
|
||||||
if err := stateFile.Migrate(); err != nil {
|
|
||||||
return fmt.Errorf("migrating state file: %w", err)
|
|
||||||
}
|
|
||||||
if err := stateFile.WriteToFile(a.fileHandler, constants.StateFilename); err != nil {
|
|
||||||
return fmt.Errorf("writing state file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate inputs
|
// Validate inputs
|
||||||
conf, stateFile, err := a.validateInputs(cmd, configFetcher)
|
conf, stateFile, err := a.validateInputs(cmd, configFetcher)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check license
|
||||||
|
a.log.Debugf("Running license check")
|
||||||
|
checker := license.NewChecker(quotaChecker, a.fileHandler)
|
||||||
|
if err := checker.CheckLicense(cmd.Context(), conf.GetProvider(), conf.Provider, cmd.Printf); err != nil {
|
||||||
|
cmd.PrintErrf("License check failed: %s", err)
|
||||||
|
}
|
||||||
|
a.log.Debugf("Checked license")
|
||||||
|
|
||||||
// Now start actually running the apply command
|
// Now start actually running the apply command
|
||||||
|
|
||||||
// Check current Terraform state, if it exists and infrastructure upgrades are not skipped,
|
// Check current Terraform state, if it exists and infrastructure upgrades are not skipped,
|
||||||
@ -375,11 +384,14 @@ func (a *applyCmd) apply(cmd *cobra.Command, configFetcher attestationconfigapi.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// From now on we can assume a valid Kubernetes admin config file exists
|
// From now on we can assume a valid Kubernetes admin config file exists
|
||||||
|
var kubeUpgrader kubernetesUpgrader
|
||||||
|
if !a.flags.skipPhases.contains(skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase) {
|
||||||
a.log.Debugf("Creating Kubernetes client using %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
|
a.log.Debugf("Creating Kubernetes client using %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
|
||||||
kubeUpgrader, err := a.newKubeUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, a.log)
|
kubeUpgrader, err = a.newKubeUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, a.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Apply Attestation Config
|
// Apply Attestation Config
|
||||||
if !a.flags.skipPhases.contains(skipAttestationConfigPhase) {
|
if !a.flags.skipPhases.contains(skipAttestationConfigPhase) {
|
||||||
@ -405,9 +417,7 @@ func (a *applyCmd) apply(cmd *cobra.Command, configFetcher attestationconfigapi.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upgrade NodeVersion object
|
// Upgrade NodeVersion object
|
||||||
// This can be skipped if we ran the init RPC, as the NodeVersion object is already up to date
|
if !(a.flags.skipPhases.contains(skipK8sPhase, skipImagePhase)) {
|
||||||
if !(a.flags.skipPhases.contains(skipK8sPhase) && a.flags.skipPhases.contains(skipImagePhase)) &&
|
|
||||||
a.flags.skipPhases.contains(skipInitPhase) {
|
|
||||||
if err := a.runK8sUpgrade(cmd, conf, kubeUpgrader); err != nil {
|
if err := a.runK8sUpgrade(cmd, conf, kubeUpgrader); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -431,29 +441,70 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check license
|
a.log.Debugf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
|
||||||
a.log.Debugf("Running license check")
|
stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename)
|
||||||
checker := license.NewChecker(a.quotaChecker, a.fileHandler)
|
if err != nil {
|
||||||
if err := checker.CheckLicense(cmd.Context(), conf.GetProvider(), conf.Provider, cmd.Printf); err != nil {
|
return nil, nil, err
|
||||||
cmd.PrintErrf("License check failed: %v", err)
|
|
||||||
}
|
}
|
||||||
a.log.Debugf("Checked license")
|
|
||||||
|
|
||||||
// Check if we already have a running Kubernetes cluster
|
// Validate the state file and set flags accordingly
|
||||||
// by checking if the Kubernetes admin config file exists
|
//
|
||||||
// If it exist, we skip the init phase
|
// We don't run "hard" verification of skip-phases flags and state file here,
|
||||||
// If it does not exist, we need to run the init RPC first
|
// a user may still end up skipping phases that could result in errors later on.
|
||||||
// This may break things further down the line
|
// However, we perform basic steps, like ensuring init phase is not skipped if
|
||||||
// It is the user's responsibility to make sure the cluster is in a valid state
|
a.log.Debugf("Validating state file")
|
||||||
a.log.Debugf("Checking if %s exists", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
|
preCreateValidateErr := stateFile.Validate(state.PreCreate, conf.GetProvider())
|
||||||
if _, err := a.fileHandler.Stat(constants.AdminConfFilename); err == nil {
|
preInitValidateErr := stateFile.Validate(state.PreInit, conf.GetProvider())
|
||||||
|
postInitValidateErr := stateFile.Validate(state.PostInit, conf.GetProvider())
|
||||||
|
|
||||||
|
// If the state file is in a pre-create state, we need to create the cluster,
|
||||||
|
// in which case the workspace has to be clean
|
||||||
|
if preCreateValidateErr == nil {
|
||||||
|
// We can't skip the infrastructure phase if no infrastructure has been defined
|
||||||
|
a.log.Debugf("State file is in pre-create state, checking workspace")
|
||||||
|
if a.flags.skipPhases.contains(skipInfrastructurePhase) {
|
||||||
|
return nil, nil, preInitValidateErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.checkCreateFilesClean(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
a.log.Debugf("No Terraform state found in current working directory. Preparing to create a new cluster.")
|
||||||
|
printCreateWarnings(cmd.ErrOrStderr(), conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the state file is in a pre-init OR
|
||||||
|
// if in pre-create state and init should not be skipped
|
||||||
|
// If so, we need to run the init RPC
|
||||||
|
if preInitValidateErr == nil || (preCreateValidateErr == nil && !a.flags.skipPhases.contains(skipInitPhase)) {
|
||||||
|
// We can't skip the init phase if the init RPC hasn't been run yet
|
||||||
|
a.log.Debugf("State file is in pre-init state, checking workspace")
|
||||||
|
if a.flags.skipPhases.contains(skipInitPhase) {
|
||||||
|
return nil, nil, postInitValidateErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.checkInitFilesClean(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip image and k8s phase, since they are covered by the init RPC
|
||||||
|
a.flags.skipPhases.add(skipImagePhase, skipK8sPhase)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the state file is in a post-init state,
|
||||||
|
// we need to make sure specific files exist in the workspace
|
||||||
|
if postInitValidateErr == nil {
|
||||||
|
a.log.Debugf("State file is in post-init state, checking workspace")
|
||||||
|
if err := a.checkPostInitFilesExist(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip init phase, since the init RPC has already been run
|
||||||
a.flags.skipPhases.add(skipInitPhase)
|
a.flags.skipPhases.add(skipInitPhase)
|
||||||
} else if !errors.Is(err, os.ErrNotExist) {
|
} else if preCreateValidateErr != nil && preInitValidateErr != nil {
|
||||||
return nil, nil, fmt.Errorf("checking for %s: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename), err)
|
return nil, nil, postInitValidateErr
|
||||||
}
|
}
|
||||||
a.log.Debugf("Init RPC required: %t", !a.flags.skipPhases.contains(skipInitPhase))
|
|
||||||
|
|
||||||
// Validate input arguments
|
|
||||||
|
|
||||||
// Validate Kubernetes version as set in the user's config
|
// Validate Kubernetes version as set in the user's config
|
||||||
// If we need to run the init RPC, the version has to be valid
|
// If we need to run the init RPC, the version has to be valid
|
||||||
@ -461,11 +512,13 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
|
|||||||
// We skip version validation if the user explicitly skips the Kubernetes phase
|
// We skip version validation if the user explicitly skips the Kubernetes phase
|
||||||
a.log.Debugf("Validating Kubernetes version %s", conf.KubernetesVersion)
|
a.log.Debugf("Validating Kubernetes version %s", conf.KubernetesVersion)
|
||||||
validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true)
|
validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true)
|
||||||
if err != nil && !a.flags.skipPhases.contains(skipK8sPhase) {
|
if err != nil {
|
||||||
a.log.Debugf("Kubernetes version not valid: %s", err)
|
a.log.Debugf("Kubernetes version not valid: %s", err)
|
||||||
if !a.flags.skipPhases.contains(skipInitPhase) {
|
if !a.flags.skipPhases.contains(skipInitPhase) {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !a.flags.skipPhases.contains(skipK8sPhase) {
|
||||||
a.log.Debugf("Checking if user wants to continue anyway")
|
a.log.Debugf("Checking if user wants to continue anyway")
|
||||||
if !a.flags.yes {
|
if !a.flags.yes {
|
||||||
confirmed, err := askToConfirm(cmd,
|
confirmed, err := askToConfirm(cmd,
|
||||||
@ -484,6 +537,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
|
|||||||
a.flags.skipPhases.add(skipK8sPhase)
|
a.flags.skipPhases.add(skipK8sPhase)
|
||||||
a.log.Debugf("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped")
|
a.log.Debugf("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if versions.IsPreviewK8sVersion(validVersion) {
|
if versions.IsPreviewK8sVersion(validVersion) {
|
||||||
cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion)
|
cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion)
|
||||||
}
|
}
|
||||||
@ -492,29 +546,19 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
|
|||||||
|
|
||||||
// Validate microservice version (helm versions) in the user's config matches the version of the CLI
|
// Validate microservice version (helm versions) in the user's config matches the version of the CLI
|
||||||
// This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC
|
// This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC
|
||||||
if !a.flags.force && !a.flags.skipPhases.contains(skipHelmPhase) && !a.flags.skipPhases.contains(skipInitPhase) {
|
if !a.flags.force && !a.flags.skipPhases.contains(skipHelmPhase, skipInitPhase) {
|
||||||
if err := validateCLIandConstellationVersionAreEqual(constants.BinaryVersion(), conf.Image, conf.MicroserviceVersion); err != nil {
|
if err := validateCLIandConstellationVersionAreEqual(constants.BinaryVersion(), conf.Image, conf.MicroserviceVersion); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constellation on QEMU or OpenStack don't support upgrades
|
// Constellation does not support image upgrades on all CSPs. Not supported are: QEMU, OpenStack
|
||||||
// If using one of those providers, make sure the command is only used to initialize a cluster
|
// If using one of those providers, print a warning when trying to upgrade the image
|
||||||
if !(conf.GetProvider() == cloudprovider.AWS || conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP) {
|
if !(conf.GetProvider() == cloudprovider.AWS || conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP) &&
|
||||||
if a.flags.skipPhases.contains(skipInitPhase) {
|
!a.flags.skipPhases.contains(skipImagePhase) {
|
||||||
return nil, nil, fmt.Errorf("upgrades are not supported for provider %s", conf.GetProvider())
|
cmd.PrintErrf("Image upgrades are not supported for provider %s\n", conf.GetProvider())
|
||||||
}
|
cmd.PrintErrln("Image phase will be skipped")
|
||||||
// Skip Terraform phase
|
a.flags.skipPhases.add(skipImagePhase)
|
||||||
a.log.Debugf("Skipping Infrastructure upgrade")
|
|
||||||
a.flags.skipPhases.add(skipInfrastructurePhase)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if Terraform state exists
|
|
||||||
if tfStateExists, err := a.tfStateExists(); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("checking Terraform state: %w", err)
|
|
||||||
} else if !tfStateExists {
|
|
||||||
a.flags.skipPhases.add(skipInfrastructurePhase)
|
|
||||||
a.log.Debugf("No Terraform state found in current working directory. Assuming self-managed infrastructure. Infrastructure upgrades will not be performed.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print warning about AWS attestation
|
// Print warning about AWS attestation
|
||||||
@ -523,31 +567,10 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
|
|||||||
cmd.PrintErrln("WARNING: Attestation temporarily relies on AWS nitroTPM. See https://docs.edgeless.systems/constellation/workflows/config#choosing-a-vm-type for more information.")
|
cmd.PrintErrln("WARNING: Attestation temporarily relies on AWS nitroTPM. See https://docs.edgeless.systems/constellation/workflows/config#choosing-a-vm-type for more information.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read and validate state file
|
|
||||||
// This needs to be done as a last step, as we need to parse all other inputs to
|
|
||||||
// know which phases are skipped.
|
|
||||||
a.log.Debugf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))
|
|
||||||
stateFile, err := state.ReadFromFile(a.fileHandler, constants.StateFilename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if a.flags.skipPhases.contains(skipInitPhase) {
|
|
||||||
// If the skipInit flag is set, we are in a state where the cluster
|
|
||||||
// has already been initialized and check against the respective constraints.
|
|
||||||
if err := stateFile.Validate(state.PostInit, conf.GetProvider()); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// The cluster has not been initialized yet, so we check against the pre-init constraints.
|
|
||||||
if err := stateFile.Validate(state.PreInit, conf.GetProvider()); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return conf, stateFile, nil
|
return conf, stateFile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyJoincConfig creates or updates the cluster's join config.
|
// applyJoinConfig creates or updates the cluster's join config.
|
||||||
// If the config already exists, and is different from the new config, the user is asked to confirm the upgrade.
|
// If the config already exists, and is different from the new config, the user is asked to confirm the upgrade.
|
||||||
func (a *applyCmd) applyJoinConfig(
|
func (a *applyCmd) applyJoinConfig(
|
||||||
cmd *cobra.Command, kubeUpgrader kubernetesUpgrader, newConfig config.AttestationCfg, measurementSalt []byte,
|
cmd *cobra.Command, kubeUpgrader kubernetesUpgrader, newConfig config.AttestationCfg, measurementSalt []byte,
|
||||||
@ -619,14 +642,121 @@ func (a *applyCmd) runK8sUpgrade(cmd *cobra.Command, conf *config.Config, kubeUp
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// tfStateExists checks whether a Constellation Terraform state exists in the current working directory.
|
// checkCreateFilesClean ensures that the workspace is clean before creating a new cluster.
|
||||||
func (a *applyCmd) tfStateExists() (bool, error) {
|
func (a *applyCmd) checkCreateFilesClean() error {
|
||||||
_, err := a.fileHandler.Stat(constants.TerraformWorkingDir)
|
if err := a.checkInitFilesClean(); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("reading Terraform state: %w", err)
|
a.log.Debugf("Checking Terraform state")
|
||||||
|
if _, err := a.fileHandler.Stat(constants.TerraformWorkingDir); err == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"terraform state %q already exists in working directory, run 'constellation terminate' before creating a new cluster",
|
||||||
|
a.flags.pathPrefixer.PrefixPrintablePath(constants.TerraformWorkingDir),
|
||||||
|
)
|
||||||
|
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return fmt.Errorf("checking for %s: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.TerraformWorkingDir), err)
|
||||||
}
|
}
|
||||||
return true, nil
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkInitFilesClean ensures that the workspace is clean before running the init RPC.
|
||||||
|
func (a *applyCmd) checkInitFilesClean() error {
|
||||||
|
a.log.Debugf("Checking admin configuration file")
|
||||||
|
if _, err := a.fileHandler.Stat(constants.AdminConfFilename); err == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"file %q already exists in working directory, run 'constellation terminate' before creating a new cluster",
|
||||||
|
a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename),
|
||||||
|
)
|
||||||
|
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename), err)
|
||||||
|
}
|
||||||
|
a.log.Debugf("Checking master secrets file")
|
||||||
|
if _, err := a.fileHandler.Stat(constants.MasterSecretFilename); err == nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"file %q already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster",
|
||||||
|
a.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename),
|
||||||
|
)
|
||||||
|
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPostInitFilesExist ensures that the workspace contains the files from a previous init RPC.
|
||||||
|
func (a *applyCmd) checkPostInitFilesExist() error {
|
||||||
|
if _, err := a.fileHandler.Stat(constants.AdminConfFilename); err != nil {
|
||||||
|
return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename), err)
|
||||||
|
}
|
||||||
|
if _, err := a.fileHandler.Stat(constants.MasterSecretFilename); err != nil {
|
||||||
|
return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printCreateWarnings(out io.Writer, conf *config.Config) {
|
||||||
|
var printedAWarning bool
|
||||||
|
if !conf.IsReleaseImage() {
|
||||||
|
fmt.Fprintln(out, "Configured image doesn't look like a released production image. Double check image before deploying to production.")
|
||||||
|
printedAWarning = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.IsNamedLikeDebugImage() && !conf.IsDebugCluster() {
|
||||||
|
fmt.Fprintln(out, "WARNING: A debug image is used but debugCluster is false.")
|
||||||
|
printedAWarning = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.IsDebugCluster() {
|
||||||
|
fmt.Fprintln(out, "WARNING: Creating a debug cluster. This cluster is not secure and should only be used for debugging purposes.")
|
||||||
|
fmt.Fprintln(out, "DO NOT USE THIS CLUSTER IN PRODUCTION.")
|
||||||
|
printedAWarning = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.GetAttestationConfig().GetVariant().Equal(variant.AzureTrustedLaunch{}) {
|
||||||
|
fmt.Fprintln(out, "Disabling Confidential VMs is insecure. Use only for evaluation purposes.")
|
||||||
|
printedAWarning = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print an extra new line later to separate warnings from the prompt message of the create command
|
||||||
|
if printedAWarning {
|
||||||
|
fmt.Fprintln(out, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipPhasesCompletion returns suggestions for the skip-phases flag.
|
||||||
|
// We suggest completion for all phases that can be skipped.
|
||||||
|
// The phases may be given in any order, as a comma-separated list.
|
||||||
|
// For example, "skip-phases helm,init" should suggest all phases but "helm" and "init".
|
||||||
|
func skipPhasesCompletion(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||||
|
skippedPhases := strings.Split(toComplete, ",")
|
||||||
|
if skippedPhases[0] == "" {
|
||||||
|
// No phases were typed yet, so suggest all phases
|
||||||
|
return allPhases(), cobra.ShellCompDirectiveNoFileComp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine what phases have already been typed by the user
|
||||||
|
phases := make(map[string]struct{})
|
||||||
|
for _, phase := range allPhases() {
|
||||||
|
phases[phase] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, phase := range skippedPhases {
|
||||||
|
delete(phases, phase)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the last phase typed by the user
|
||||||
|
// This is the phase we want to complete
|
||||||
|
lastPhase := skippedPhases[len(skippedPhases)-1]
|
||||||
|
fullyTypedPhases := strings.TrimSuffix(toComplete, lastPhase)
|
||||||
|
|
||||||
|
// Add all phases that have not been typed yet to the suggestions
|
||||||
|
// The suggestion is the fully typed phases + the phase that is being completed
|
||||||
|
var suggestions []string
|
||||||
|
for phase := range phases {
|
||||||
|
if strings.HasPrefix(phase, lastPhase) {
|
||||||
|
suggestions = append(suggestions, fmt.Sprintf("%s%s", fullyTypedPhases, phase))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return suggestions, cobra.ShellCompDirectiveNoFileComp
|
||||||
}
|
}
|
||||||
|
@ -7,15 +7,23 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
@ -24,8 +32,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// defaultStateFile returns a valid default state for testing.
|
// defaultStateFile returns a valid default state for testing.
|
||||||
func defaultStateFile() *state.State {
|
func defaultStateFile(csp cloudprovider.Provider) *state.State {
|
||||||
return &state.State{
|
stateFile := &state.State{
|
||||||
Version: "v1",
|
Version: "v1",
|
||||||
Infrastructure: state.Infrastructure{
|
Infrastructure: state.Infrastructure{
|
||||||
UID: "123",
|
UID: "123",
|
||||||
@ -57,18 +65,16 @@ func defaultStateFile() *state.State {
|
|||||||
MeasurementSalt: []byte{0x41},
|
MeasurementSalt: []byte{0x41},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
switch csp {
|
||||||
|
case cloudprovider.GCP:
|
||||||
|
stateFile.Infrastructure.Azure = nil
|
||||||
|
case cloudprovider.Azure:
|
||||||
|
stateFile.Infrastructure.GCP = nil
|
||||||
|
default:
|
||||||
|
stateFile.Infrastructure.Azure = nil
|
||||||
|
stateFile.Infrastructure.GCP = nil
|
||||||
}
|
}
|
||||||
|
return stateFile
|
||||||
func defaultAzureStateFile() *state.State {
|
|
||||||
s := defaultStateFile()
|
|
||||||
s.Infrastructure.GCP = nil
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultGCPStateFile() *state.State {
|
|
||||||
s := defaultStateFile()
|
|
||||||
s.Infrastructure.Azure = nil
|
|
||||||
return s
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseApplyFlags(t *testing.T) {
|
func TestParseApplyFlags(t *testing.T) {
|
||||||
@ -102,7 +108,7 @@ func TestParseApplyFlags(t *testing.T) {
|
|||||||
return flags
|
return flags
|
||||||
}(),
|
}(),
|
||||||
wantFlags: applyFlags{
|
wantFlags: applyFlags{
|
||||||
skipPhases: skipPhases{skipHelmPhase: struct{}{}, skipK8sPhase: struct{}{}},
|
skipPhases: newPhases(skipHelmPhase, skipK8sPhase),
|
||||||
helmWaitMode: helm.WaitModeAtomic,
|
helmWaitMode: helm.WaitModeAtomic,
|
||||||
upgradeTimeout: 5 * time.Minute,
|
upgradeTimeout: 5 * time.Minute,
|
||||||
},
|
},
|
||||||
@ -202,6 +208,7 @@ func TestBackupHelmCharts(t *testing.T) {
|
|||||||
|
|
||||||
func TestSkipPhases(t *testing.T) {
|
func TestSkipPhases(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
assert := assert.New(t)
|
||||||
cmd := NewApplyCmd()
|
cmd := NewApplyCmd()
|
||||||
// register persistent flags manually
|
// register persistent flags manually
|
||||||
cmd.Flags().String("workspace", "", "")
|
cmd.Flags().String("workspace", "", "")
|
||||||
@ -210,11 +217,273 @@ func TestSkipPhases(t *testing.T) {
|
|||||||
cmd.Flags().Bool("debug", false, "")
|
cmd.Flags().Bool("debug", false, "")
|
||||||
|
|
||||||
require.NoError(cmd.Flags().Set("skip-phases", strings.Join(allPhases(), ",")))
|
require.NoError(cmd.Flags().Set("skip-phases", strings.Join(allPhases(), ",")))
|
||||||
wantPhases := skipPhases{}
|
wantPhases := newPhases(skipInfrastructurePhase, skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase)
|
||||||
wantPhases.add(skipInfrastructurePhase, skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase)
|
|
||||||
|
|
||||||
var flags applyFlags
|
var flags applyFlags
|
||||||
err := flags.parse(cmd.Flags())
|
err := flags.parse(cmd.Flags())
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
assert.Equal(t, wantPhases, flags.skipPhases)
|
assert.Equal(wantPhases, flags.skipPhases)
|
||||||
|
|
||||||
|
phases := newPhases(skipAttestationConfigPhase, skipCertSANsPhase)
|
||||||
|
assert.True(phases.contains(skipAttestationConfigPhase, skipCertSANsPhase))
|
||||||
|
assert.False(phases.contains(skipAttestationConfigPhase, skipInitPhase))
|
||||||
|
assert.False(phases.contains(skipInitPhase, skipInfrastructurePhase))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateInputs(t *testing.T) {
|
||||||
|
defaultConfig := func(csp cloudprovider.Provider) func(require *require.Assertions, fh file.Handler) {
|
||||||
|
return func(require *require.Assertions, fh file.Handler) {
|
||||||
|
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), csp)
|
||||||
|
|
||||||
|
if csp == cloudprovider.GCP {
|
||||||
|
require.NoError(fh.WriteJSON("saKey.json", &gcpshared.ServiceAccountKey{
|
||||||
|
Type: "service_account",
|
||||||
|
ProjectID: "project_id",
|
||||||
|
PrivateKeyID: "key_id",
|
||||||
|
PrivateKey: "key",
|
||||||
|
ClientEmail: "client_email",
|
||||||
|
ClientID: "client_id",
|
||||||
|
AuthURI: "auth_uri",
|
||||||
|
TokenURI: "token_uri",
|
||||||
|
AuthProviderX509CertURL: "cert",
|
||||||
|
ClientX509CertURL: "client_cert",
|
||||||
|
}))
|
||||||
|
cfg.Provider.GCP.ServiceAccountKeyPath = "saKey.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(fh.WriteYAML(constants.ConfigFilename, cfg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
preInitState := func(csp cloudprovider.Provider) func(require *require.Assertions, fh file.Handler) {
|
||||||
|
return func(require *require.Assertions, fh file.Handler) {
|
||||||
|
stateFile := defaultStateFile(csp)
|
||||||
|
stateFile.ClusterValues = state.ClusterValues{}
|
||||||
|
require.NoError(fh.WriteYAML(constants.StateFilename, stateFile))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
postInitState := func(csp cloudprovider.Provider) func(require *require.Assertions, fh file.Handler) {
|
||||||
|
return func(require *require.Assertions, fh file.Handler) {
|
||||||
|
require.NoError(fh.WriteYAML(constants.StateFilename, defaultStateFile(csp)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defaultMasterSecret := func(require *require.Assertions, fh file.Handler) {
|
||||||
|
require.NoError(fh.WriteJSON(constants.MasterSecretFilename, &uri.MasterSecret{}))
|
||||||
|
}
|
||||||
|
defaultAdminConfig := func(require *require.Assertions, fh file.Handler) {
|
||||||
|
require.NoError(fh.Write(constants.AdminConfFilename, []byte("admin config")))
|
||||||
|
}
|
||||||
|
defaultTfState := func(require *require.Assertions, fh file.Handler) {
|
||||||
|
require.NoError(fh.Write(filepath.Join(constants.TerraformWorkingDir, "tfvars"), []byte("tf state")))
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := map[string]struct {
|
||||||
|
createConfig func(require *require.Assertions, fh file.Handler)
|
||||||
|
createState func(require *require.Assertions, fh file.Handler)
|
||||||
|
createMasterSecret func(require *require.Assertions, fh file.Handler)
|
||||||
|
createAdminConfig func(require *require.Assertions, fh file.Handler)
|
||||||
|
createTfState func(require *require.Assertions, fh file.Handler)
|
||||||
|
stdin string
|
||||||
|
flags applyFlags
|
||||||
|
wantPhases skipPhases
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
"[upgrade] gcp: all files exist": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: postInitState(cloudprovider.GCP),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: defaultAdminConfig,
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantPhases: newPhases(skipInitPhase),
|
||||||
|
},
|
||||||
|
"[upgrade] aws: all files exist": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.AWS),
|
||||||
|
createState: postInitState(cloudprovider.AWS),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: defaultAdminConfig,
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantPhases: newPhases(skipInitPhase),
|
||||||
|
},
|
||||||
|
"[upgrade] azure: all files exist": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.Azure),
|
||||||
|
createState: postInitState(cloudprovider.Azure),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: defaultAdminConfig,
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantPhases: newPhases(skipInitPhase),
|
||||||
|
},
|
||||||
|
"[upgrade] qemu: all files exist": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.QEMU),
|
||||||
|
createState: postInitState(cloudprovider.QEMU),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: defaultAdminConfig,
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantPhases: newPhases(skipInitPhase, skipImagePhase), // No image upgrades on QEMU
|
||||||
|
},
|
||||||
|
"no config file errors": {
|
||||||
|
createConfig: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createState: postInitState(cloudprovider.GCP),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: defaultAdminConfig,
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"[init] no admin config file, but mastersecret file exists errors": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: preInitState(cloudprovider.GCP),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"[init] no admin config file, no master secret": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: preInitState(cloudprovider.GCP),
|
||||||
|
createMasterSecret: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createAdminConfig: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createTfState: defaultTfState,
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantPhases: newPhases(skipImagePhase, skipK8sPhase),
|
||||||
|
},
|
||||||
|
"[create] no tf state, but admin config exists errors": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: preInitState(cloudprovider.GCP),
|
||||||
|
createMasterSecret: defaultMasterSecret,
|
||||||
|
createAdminConfig: defaultAdminConfig,
|
||||||
|
createTfState: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"[create] only config, skip everything but infrastructure": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createMasterSecret: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createAdminConfig: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createTfState: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
flags: applyFlags{
|
||||||
|
skipPhases: newPhases(skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase),
|
||||||
|
},
|
||||||
|
wantPhases: newPhases(skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase),
|
||||||
|
},
|
||||||
|
"[create + init] only config file": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createMasterSecret: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createAdminConfig: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createTfState: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
flags: applyFlags{},
|
||||||
|
wantPhases: newPhases(skipImagePhase, skipK8sPhase),
|
||||||
|
},
|
||||||
|
"[init] self-managed: config and state file exist, skip-phases=infrastructure": {
|
||||||
|
createConfig: defaultConfig(cloudprovider.GCP),
|
||||||
|
createState: preInitState(cloudprovider.GCP),
|
||||||
|
createMasterSecret: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createAdminConfig: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
createTfState: func(require *require.Assertions, fh file.Handler) {},
|
||||||
|
flags: applyFlags{
|
||||||
|
skipPhases: newPhases(skipInfrastructurePhase),
|
||||||
|
},
|
||||||
|
wantPhases: newPhases(skipInfrastructurePhase, skipImagePhase, skipK8sPhase),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
|
tc.createConfig(require, fileHandler)
|
||||||
|
tc.createState(require, fileHandler)
|
||||||
|
tc.createMasterSecret(require, fileHandler)
|
||||||
|
tc.createAdminConfig(require, fileHandler)
|
||||||
|
tc.createTfState(require, fileHandler)
|
||||||
|
|
||||||
|
cmd := NewApplyCmd()
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.SetOut(&out)
|
||||||
|
var errOut bytes.Buffer
|
||||||
|
cmd.SetErr(&errOut)
|
||||||
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
|
|
||||||
|
a := applyCmd{
|
||||||
|
log: logger.NewTest(t),
|
||||||
|
fileHandler: fileHandler,
|
||||||
|
flags: tc.flags,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := a.validateInputs(cmd, &stubAttestationFetcher{})
|
||||||
|
if tc.wantErr {
|
||||||
|
assert.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.NoError(err)
|
||||||
|
var cfgErr *config.ValidationError
|
||||||
|
if errors.As(err, &cfgErr) {
|
||||||
|
t.Log(cfgErr.LongMessage())
|
||||||
|
}
|
||||||
|
assert.Equal(tc.wantPhases, a.flags.skipPhases)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSkipPhasesCompletion(t *testing.T) {
|
||||||
|
testCases := map[string]struct {
|
||||||
|
toComplete string
|
||||||
|
wantSuggestions []string
|
||||||
|
}{
|
||||||
|
"empty": {
|
||||||
|
toComplete: "",
|
||||||
|
wantSuggestions: allPhases(),
|
||||||
|
},
|
||||||
|
"partial": {
|
||||||
|
toComplete: "hel",
|
||||||
|
wantSuggestions: []string{string(skipHelmPhase)},
|
||||||
|
},
|
||||||
|
"one full word": {
|
||||||
|
toComplete: string(skipHelmPhase),
|
||||||
|
},
|
||||||
|
"one full word with comma": {
|
||||||
|
toComplete: string(skipHelmPhase) + ",",
|
||||||
|
wantSuggestions: func() []string {
|
||||||
|
allPhases := allPhases()
|
||||||
|
var suggestions []string
|
||||||
|
for _, phase := range allPhases {
|
||||||
|
if phase == string(skipHelmPhase) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
suggestions = append(suggestions, fmt.Sprintf("%s,%s", skipHelmPhase, phase))
|
||||||
|
}
|
||||||
|
return suggestions
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
"one full word, one partial": {
|
||||||
|
toComplete: string(skipHelmPhase) + ",ima",
|
||||||
|
wantSuggestions: []string{fmt.Sprintf("%s,%s", skipHelmPhase, skipImagePhase)},
|
||||||
|
},
|
||||||
|
"all phases": {
|
||||||
|
toComplete: strings.Join(allPhases(), ","),
|
||||||
|
wantSuggestions: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
suggestions, _ := skipPhasesCompletion(nil, nil, tc.toComplete)
|
||||||
|
assert.ElementsMatch(tc.wantSuggestions, suggestions, "got: %v, want: %v", suggestions, tc.wantSuggestions)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPhases(phases ...skipPhase) skipPhases {
|
||||||
|
skipPhases := skipPhases{}
|
||||||
|
skipPhases.add(phases...)
|
||||||
|
return skipPhases
|
||||||
}
|
}
|
||||||
|
@ -7,11 +7,14 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -20,49 +23,53 @@ import (
|
|||||||
// runTerraformApply checks if changes to Terraform are required and applies them.
|
// runTerraformApply checks if changes to Terraform are required and applies them.
|
||||||
func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error {
|
func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error {
|
||||||
a.log.Debugf("Checking if Terraform migrations are required")
|
a.log.Debugf("Checking if Terraform migrations are required")
|
||||||
terraformClient, removeInstaller, err := a.newInfraApplier(cmd.Context())
|
terraformClient, removeClient, err := a.newInfraApplier(cmd.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating Terraform client: %w", err)
|
return fmt.Errorf("creating Terraform client: %w", err)
|
||||||
}
|
}
|
||||||
defer removeInstaller()
|
defer removeClient()
|
||||||
|
|
||||||
migrationRequired, err := a.planTerraformMigration(cmd, conf, terraformClient)
|
// Check if we are creating a new cluster by checking if the Terraform workspace is empty
|
||||||
|
isNewCluster, err := terraformClient.WorkingDirIsEmpty()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("planning Terraform migrations: %w", err)
|
return fmt.Errorf("checking if Terraform workspace is empty: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !migrationRequired {
|
if changesRequired, err := a.planTerraformChanges(cmd, conf, terraformClient); err != nil {
|
||||||
|
return fmt.Errorf("planning Terraform migrations: %w", err)
|
||||||
|
} else if !changesRequired {
|
||||||
a.log.Debugf("No changes to infrastructure required, skipping Terraform migrations")
|
a.log.Debugf("No changes to infrastructure required, skipping Terraform migrations")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Debugf("Migrating terraform resources for infrastructure changes")
|
a.log.Debugf("Apply new Terraform resources for infrastructure changes")
|
||||||
postMigrationInfraState, err := a.migrateTerraform(cmd, conf, terraformClient, upgradeDir)
|
newInfraState, err := a.applyTerraformChanges(cmd, conf, terraformClient, upgradeDir, isNewCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("performing Terraform migrations: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge the pre-upgrade state with the post-migration infrastructure values
|
// Merge the original state with the new infrastructure values
|
||||||
a.log.Debugf("Updating state file with new infrastructure state")
|
a.log.Debugf("Updating state file with new infrastructure state")
|
||||||
if _, err := stateFile.Merge(
|
if _, err := stateFile.Merge(
|
||||||
// temporary state with post-migration infrastructure values
|
// temporary state with new infrastructure values
|
||||||
state.New().SetInfrastructure(postMigrationInfraState),
|
state.New().SetInfrastructure(newInfraState),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return fmt.Errorf("merging pre-upgrade state with post-migration infrastructure values: %w", err)
|
return fmt.Errorf("merging old state with new infrastructure values: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the post-migration state to disk
|
// Write the new state to disk
|
||||||
if err := stateFile.WriteToFile(a.fileHandler, constants.StateFilename); err != nil {
|
if err := stateFile.WriteToFile(a.fileHandler, constants.StateFilename); err != nil {
|
||||||
return fmt.Errorf("writing state file: %w", err)
|
return fmt.Errorf("writing state file: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// planTerraformMigration checks if the Constellation version the cluster is being upgraded to requires a migration.
|
// planTerraformChanges checks if any changes to the Terraform state are required.
|
||||||
func (a *applyCmd) planTerraformMigration(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier) (bool, error) {
|
// If no state exists, this function will return true and the caller should create a new state.
|
||||||
a.log.Debugf("Planning Terraform migrations")
|
func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier) (bool, error) {
|
||||||
|
a.log.Debugf("Planning Terraform changes")
|
||||||
|
|
||||||
// Check if there are any Terraform migrations to apply
|
// Check if there are any Terraform changes to apply
|
||||||
|
|
||||||
// Add manual migrations here if required
|
// Add manual migrations here if required
|
||||||
//
|
//
|
||||||
@ -77,42 +84,119 @@ func (a *applyCmd) planTerraformMigration(cmd *cobra.Command, conf *config.Confi
|
|||||||
return terraformClient.Plan(cmd.Context(), conf)
|
return terraformClient.Plan(cmd.Context(), conf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// migrateTerraform migrates an existing Terraform state and the post-migration infrastructure state is returned.
|
// applyTerraformChanges applies planned changes to a Terraform state and returns the resulting infrastructure state.
|
||||||
func (a *applyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier, upgradeDir string) (state.Infrastructure, error) {
|
// If no state existed prior to this function call, a new cluster will be created.
|
||||||
|
func (a *applyCmd) applyTerraformChanges(
|
||||||
|
cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier, upgradeDir string, isNewCluster bool,
|
||||||
|
) (state.Infrastructure, error) {
|
||||||
|
if isNewCluster {
|
||||||
|
if err := printCreateInfo(cmd.OutOrStdout(), conf, a.log); err != nil {
|
||||||
|
return state.Infrastructure{}, err
|
||||||
|
}
|
||||||
|
return a.applyTerraformChangesWithMessage(
|
||||||
|
cmd, conf.GetProvider(), cloudcmd.WithRollbackOnError, terraformClient, upgradeDir,
|
||||||
|
"Do you want to create this cluster?",
|
||||||
|
"The creation of the cluster was aborted.",
|
||||||
|
"cluster creation aborted by user",
|
||||||
|
"Creating",
|
||||||
|
"Cloud infrastructure created successfully.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println("Changes of Constellation cloud resources are required by applying an updated Terraform template.")
|
||||||
|
return a.applyTerraformChangesWithMessage(
|
||||||
|
cmd, conf.GetProvider(), cloudcmd.WithoutRollbackOnError, terraformClient, upgradeDir,
|
||||||
|
"Do you want to apply these Terraform changes?",
|
||||||
|
"Aborting upgrade.",
|
||||||
|
"cluster upgrade aborted by user",
|
||||||
|
"Applying Terraform changes",
|
||||||
|
fmt.Sprintf("Infrastructure migrations applied successfully and output written to: %s\n"+
|
||||||
|
"A backup of the pre-upgrade state has been written to: %s",
|
||||||
|
a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename),
|
||||||
|
a.flags.pathPrefixer.PrefixPrintablePath(filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *applyCmd) applyTerraformChangesWithMessage(
|
||||||
|
cmd *cobra.Command, csp cloudprovider.Provider, rollbackBehavior cloudcmd.RollbackBehavior,
|
||||||
|
terraformClient cloudApplier, upgradeDir string,
|
||||||
|
confirmationQst, abortMsg, abortErrorMsg, progressMsg, successMsg string,
|
||||||
|
) (state.Infrastructure, error) {
|
||||||
// Ask for confirmation first
|
// Ask for confirmation first
|
||||||
cmd.Println("The upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template.")
|
|
||||||
if !a.flags.yes {
|
if !a.flags.yes {
|
||||||
ok, err := askToConfirm(cmd, "Do you want to apply the Terraform migrations?")
|
ok, err := askToConfirm(cmd, confirmationQst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Infrastructure{}, fmt.Errorf("asking for confirmation: %w", err)
|
return state.Infrastructure{}, fmt.Errorf("asking for confirmation: %w", err)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
cmd.Println("Aborting upgrade.")
|
cmd.Println(abortMsg)
|
||||||
// User doesn't expect to see any changes in his workspace after aborting an "upgrade apply",
|
// User doesn't expect to see any changes in their workspace after aborting an "apply",
|
||||||
// therefore, roll back to the backed up state.
|
// therefore, restore the workspace to the previous state.
|
||||||
if err := terraformClient.RestoreWorkspace(); err != nil {
|
if err := terraformClient.RestoreWorkspace(); err != nil {
|
||||||
return state.Infrastructure{}, fmt.Errorf(
|
return state.Infrastructure{}, fmt.Errorf(
|
||||||
"restoring Terraform workspace: %w, restore the Terraform workspace manually from %s ",
|
"restoring Terraform workspace: %w, clean up or restore the Terraform workspace manually from %s ",
|
||||||
err,
|
err,
|
||||||
filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir),
|
filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return state.Infrastructure{}, fmt.Errorf("cluster upgrade aborted by user")
|
return state.Infrastructure{}, errors.New(abortErrorMsg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.log.Debugf("Applying Terraform migrations")
|
a.log.Debugf("Applying Terraform changes")
|
||||||
|
|
||||||
a.spinner.Start("Migrating Terraform resources", false)
|
a.spinner.Start(progressMsg, false)
|
||||||
infraState, err := terraformClient.Apply(cmd.Context(), conf.GetProvider(), cloudcmd.WithoutRollbackOnError)
|
infraState, err := terraformClient.Apply(cmd.Context(), csp, rollbackBehavior)
|
||||||
a.spinner.Stop()
|
a.spinner.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return state.Infrastructure{}, fmt.Errorf("applying terraform migrations: %w", err)
|
return state.Infrastructure{}, fmt.Errorf("applying terraform changes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Printf("Infrastructure migrations applied successfully and output written to: %s\n"+
|
cmd.Println(successMsg)
|
||||||
"A backup of the pre-upgrade state has been written to: %s\n",
|
|
||||||
a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename),
|
|
||||||
a.flags.pathPrefixer.PrefixPrintablePath(filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir)),
|
|
||||||
)
|
|
||||||
return infraState, nil
|
return infraState, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printCreateInfo(out io.Writer, conf *config.Config, log debugLog) error {
|
||||||
|
controlPlaneGroup, ok := conf.NodeGroups[constants.DefaultControlPlaneGroupName]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("default control-plane node group %q not found in configuration", constants.DefaultControlPlaneGroupName)
|
||||||
|
}
|
||||||
|
controlPlaneType := controlPlaneGroup.InstanceType
|
||||||
|
|
||||||
|
workerGroup, ok := conf.NodeGroups[constants.DefaultWorkerGroupName]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("default worker node group %q not found in configuration", constants.DefaultWorkerGroupName)
|
||||||
|
}
|
||||||
|
workerGroupType := workerGroup.InstanceType
|
||||||
|
|
||||||
|
var qemuInstanceType string
|
||||||
|
if conf.GetProvider() == cloudprovider.QEMU {
|
||||||
|
qemuInstanceType = fmt.Sprintf("%d-vCPUs", conf.Provider.QEMU.VCPUs)
|
||||||
|
controlPlaneType = qemuInstanceType
|
||||||
|
workerGroupType = qemuInstanceType
|
||||||
|
}
|
||||||
|
|
||||||
|
otherGroupNames := make([]string, 0, len(conf.NodeGroups)-2)
|
||||||
|
for groupName := range conf.NodeGroups {
|
||||||
|
if groupName != constants.DefaultControlPlaneGroupName && groupName != constants.DefaultWorkerGroupName {
|
||||||
|
otherGroupNames = append(otherGroupNames, groupName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(otherGroupNames) > 0 {
|
||||||
|
log.Debugf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(out, "The following Constellation cluster will be created:\n")
|
||||||
|
fmt.Fprintf(out, " %d control-plane node%s of type %s will be created.\n", controlPlaneGroup.InitialCount, isPlural(controlPlaneGroup.InitialCount), controlPlaneType)
|
||||||
|
fmt.Fprintf(out, " %d worker node%s of type %s will be created.\n", workerGroup.InitialCount, isPlural(workerGroup.InitialCount), workerGroupType)
|
||||||
|
for _, groupName := range otherGroupNames {
|
||||||
|
group := conf.NodeGroups[groupName]
|
||||||
|
groupInstanceType := group.InstanceType
|
||||||
|
if conf.GetProvider() == cloudprovider.QEMU {
|
||||||
|
groupInstanceType = qemuInstanceType
|
||||||
|
}
|
||||||
|
fmt.Fprintf(out, " group %s with %d node%s of type %s will be created.\n", groupName, group.InitialCount, isPlural(group.InitialCount), groupInstanceType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -21,6 +21,7 @@ type cloudApplier interface {
|
|||||||
Plan(ctx context.Context, conf *config.Config) (bool, error)
|
Plan(ctx context.Context, conf *config.Config) (bool, error)
|
||||||
Apply(ctx context.Context, csp cloudprovider.Provider, rollback cloudcmd.RollbackBehavior) (state.Infrastructure, error)
|
Apply(ctx context.Context, csp cloudprovider.Provider, rollback cloudcmd.RollbackBehavior) (state.Infrastructure, error)
|
||||||
RestoreWorkspace() error
|
RestoreWorkspace() error
|
||||||
|
WorkingDirIsEmpty() (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type cloudIAMCreator interface {
|
type cloudIAMCreator interface {
|
||||||
|
@ -29,14 +29,18 @@ func TestMain(m *testing.M) {
|
|||||||
type stubCloudCreator struct {
|
type stubCloudCreator struct {
|
||||||
state state.Infrastructure
|
state state.Infrastructure
|
||||||
planCalled bool
|
planCalled bool
|
||||||
|
planDiff bool
|
||||||
planErr error
|
planErr error
|
||||||
applyCalled bool
|
applyCalled bool
|
||||||
applyErr error
|
applyErr error
|
||||||
|
restoreErr error
|
||||||
|
workspaceIsEmpty bool
|
||||||
|
workspaceIsEmptyErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubCloudCreator) Plan(_ context.Context, _ *config.Config) (bool, error) {
|
func (c *stubCloudCreator) Plan(_ context.Context, _ *config.Config) (bool, error) {
|
||||||
c.planCalled = true
|
c.planCalled = true
|
||||||
return false, c.planErr
|
return c.planDiff, c.planErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubCloudCreator) Apply(_ context.Context, _ cloudprovider.Provider, _ cloudcmd.RollbackBehavior) (state.Infrastructure, error) {
|
func (c *stubCloudCreator) Apply(_ context.Context, _ cloudprovider.Provider, _ cloudcmd.RollbackBehavior) (state.Infrastructure, error) {
|
||||||
@ -45,7 +49,11 @@ func (c *stubCloudCreator) Apply(_ context.Context, _ cloudprovider.Provider, _
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubCloudCreator) RestoreWorkspace() error {
|
func (c *stubCloudCreator) RestoreWorkspace() error {
|
||||||
return nil
|
return c.restoreErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *stubCloudCreator) WorkingDirIsEmpty() (bool, error) {
|
||||||
|
return c.workspaceIsEmpty, c.workspaceIsEmptyErr
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubCloudTerminator struct {
|
type stubCloudTerminator struct {
|
||||||
|
@ -7,24 +7,12 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"time"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||||
"github.com/spf13/afero"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCreateCmd returns a new cobra.Command for the create command.
|
// NewCreateCmd returns a new cobra.Command for the create command.
|
||||||
@ -34,215 +22,21 @@ func NewCreateCmd() *cobra.Command {
|
|||||||
Short: "Create instances on a cloud platform for your Constellation cluster",
|
Short: "Create instances on a cloud platform for your Constellation cluster",
|
||||||
Long: "Create instances on a cloud platform for your Constellation cluster.",
|
Long: "Create instances on a cloud platform for your Constellation cluster.",
|
||||||
Args: cobra.ExactArgs(0),
|
Args: cobra.ExactArgs(0),
|
||||||
RunE: runCreate,
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
cmd.Flags().Bool("conformance", false, "")
|
||||||
|
cmd.Flags().Bool("skip-helm-wait", false, "")
|
||||||
|
cmd.Flags().Bool("merge-kubeconfig", false, "")
|
||||||
|
cmd.Flags().Duration("timeout", 5*time.Minute, "")
|
||||||
|
// Skip all phases but the infrastructure phase.
|
||||||
|
cmd.Flags().StringSlice("skip-phases", allPhases(skipInfrastructurePhase), "")
|
||||||
|
return runApply(cmd, args)
|
||||||
|
},
|
||||||
|
Deprecated: "use 'constellation apply' instead.",
|
||||||
}
|
}
|
||||||
cmd.Flags().BoolP("yes", "y", false, "create the cluster without further confirmation")
|
cmd.Flags().BoolP("yes", "y", false, "create the cluster without further confirmation")
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// createFlags contains the parsed flags of the create command.
|
|
||||||
type createFlags struct {
|
|
||||||
rootFlags
|
|
||||||
yes bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse parses the flags of the create command.
|
|
||||||
func (f *createFlags) parse(flags *pflag.FlagSet) error {
|
|
||||||
if err := f.rootFlags.parse(flags); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
yes, err := flags.GetBool("yes")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting 'yes' flag: %w", err)
|
|
||||||
}
|
|
||||||
f.yes = yes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type createCmd struct {
|
|
||||||
log debugLog
|
|
||||||
flags createFlags
|
|
||||||
}
|
|
||||||
|
|
||||||
func runCreate(cmd *cobra.Command, _ []string) error {
|
|
||||||
log, err := newCLILogger(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("creating logger: %w", err)
|
|
||||||
}
|
|
||||||
defer log.Sync()
|
|
||||||
spinner, err := newSpinnerOrStderr(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("creating spinner: %w", err)
|
|
||||||
}
|
|
||||||
defer spinner.Stop()
|
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
|
||||||
c := &createCmd{log: log}
|
|
||||||
if err := c.flags.parse(cmd.Flags()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.log.Debugf("Using flags: %+v", c.flags)
|
|
||||||
|
|
||||||
applier, removeInstaller, err := cloudcmd.NewApplier(
|
|
||||||
cmd.Context(),
|
|
||||||
spinner,
|
|
||||||
constants.TerraformWorkingDir,
|
|
||||||
filepath.Join(constants.UpgradeDir, "create"), // Not used by create
|
|
||||||
c.flags.tfLogLevel,
|
|
||||||
fileHandler,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer removeInstaller()
|
|
||||||
|
|
||||||
fetcher := attestationconfigapi.NewFetcher()
|
|
||||||
return c.create(cmd, applier, fileHandler, spinner, fetcher)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *createCmd) create(cmd *cobra.Command, applier cloudApplier, fileHandler file.Handler, spinner spinnerInterf, fetcher attestationconfigapi.Fetcher) (retErr error) {
|
|
||||||
if err := c.checkDirClean(fileHandler); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.log.Debugf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))
|
|
||||||
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, c.flags.force)
|
|
||||||
c.log.Debugf("Configuration file loaded: %+v", conf)
|
|
||||||
var configValidationErr *config.ValidationError
|
|
||||||
if errors.As(err, &configValidationErr) {
|
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !c.flags.force {
|
|
||||||
if err := validateCLIandConstellationVersionAreEqual(constants.BinaryVersion(), conf.Image, conf.MicroserviceVersion); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.log.Debugf("Checking configuration for warnings")
|
|
||||||
var printedAWarning bool
|
|
||||||
if !conf.IsReleaseImage() {
|
|
||||||
cmd.PrintErrln("Configured image doesn't look like a released production image. Double check image before deploying to production.")
|
|
||||||
printedAWarning = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.IsNamedLikeDebugImage() && !conf.IsDebugCluster() {
|
|
||||||
cmd.PrintErrln("WARNING: A debug image is used but debugCluster is false.")
|
|
||||||
printedAWarning = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.IsDebugCluster() {
|
|
||||||
cmd.PrintErrln("WARNING: Creating a debug cluster. This cluster is not secure and should only be used for debugging purposes.")
|
|
||||||
cmd.PrintErrln("DO NOT USE THIS CLUSTER IN PRODUCTION.")
|
|
||||||
printedAWarning = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.GetAttestationConfig().GetVariant().Equal(variant.AzureTrustedLaunch{}) {
|
|
||||||
cmd.PrintErrln("Disabling Confidential VMs is insecure. Use only for evaluation purposes.")
|
|
||||||
printedAWarning = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print an extra new line later to separate warnings from the prompt message of the create command
|
|
||||||
if printedAWarning {
|
|
||||||
cmd.PrintErrln("")
|
|
||||||
}
|
|
||||||
|
|
||||||
controlPlaneGroup, ok := conf.NodeGroups[constants.DefaultControlPlaneGroupName]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("default control-plane node group %q not found in configuration", constants.DefaultControlPlaneGroupName)
|
|
||||||
}
|
|
||||||
workerGroup, ok := conf.NodeGroups[constants.DefaultWorkerGroupName]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("default worker node group %q not found in configuration", constants.DefaultWorkerGroupName)
|
|
||||||
}
|
|
||||||
otherGroupNames := make([]string, 0, len(conf.NodeGroups)-2)
|
|
||||||
for groupName := range conf.NodeGroups {
|
|
||||||
if groupName != constants.DefaultControlPlaneGroupName && groupName != constants.DefaultWorkerGroupName {
|
|
||||||
otherGroupNames = append(otherGroupNames, groupName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(otherGroupNames) > 0 {
|
|
||||||
c.log.Debugf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.flags.yes {
|
|
||||||
// Ask user to confirm action.
|
|
||||||
cmd.Printf("The following Constellation cluster will be created:\n")
|
|
||||||
cmd.Printf(" %d control-plane node%s of type %s will be created.\n", controlPlaneGroup.InitialCount, isPlural(controlPlaneGroup.InitialCount), controlPlaneGroup.InstanceType)
|
|
||||||
cmd.Printf(" %d worker node%s of type %s will be created.\n", workerGroup.InitialCount, isPlural(workerGroup.InitialCount), workerGroup.InstanceType)
|
|
||||||
for _, groupName := range otherGroupNames {
|
|
||||||
group := conf.NodeGroups[groupName]
|
|
||||||
cmd.Printf(" group %s with %d node%s of type %s will be created.\n", groupName, group.InitialCount, isPlural(group.InitialCount), group.InstanceType)
|
|
||||||
}
|
|
||||||
ok, err := askToConfirm(cmd, "Do you want to create this cluster?")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
cmd.Println("The creation of the cluster was aborted.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spinner.Start("Creating", false)
|
|
||||||
if _, err := applier.Plan(cmd.Context(), conf); err != nil {
|
|
||||||
return fmt.Errorf("planning infrastructure creation: %w", err)
|
|
||||||
}
|
|
||||||
infraState, err := applier.Apply(cmd.Context(), conf.GetProvider(), cloudcmd.WithRollbackOnError)
|
|
||||||
spinner.Stop()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.log.Debugf("Successfully created the cloud resources for the cluster")
|
|
||||||
|
|
||||||
stateFile, err := state.CreateOrRead(fileHandler, constants.StateFilename)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("reading state file: %w", err)
|
|
||||||
}
|
|
||||||
if err := stateFile.Validate(state.PreCreate, conf.GetProvider()); err != nil {
|
|
||||||
return fmt.Errorf("validating state file: %w", err)
|
|
||||||
}
|
|
||||||
stateFile = stateFile.SetInfrastructure(infraState)
|
|
||||||
if err := stateFile.WriteToFile(fileHandler, constants.StateFilename); err != nil {
|
|
||||||
return fmt.Errorf("writing state file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.Println("Your Constellation cluster was created successfully.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkDirClean checks if files of a previous Constellation are left in the current working dir.
|
|
||||||
func (c *createCmd) checkDirClean(fileHandler file.Handler) error {
|
|
||||||
c.log.Debugf("Checking admin configuration file")
|
|
||||||
if _, err := fileHandler.Stat(constants.AdminConfFilename); !errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"file '%s' already exists in working directory, run 'constellation terminate' before creating a new one",
|
|
||||||
c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
c.log.Debugf("Checking master secrets file")
|
|
||||||
if _, err := fileHandler.Stat(constants.MasterSecretFilename); !errors.Is(err, fs.ErrNotExist) {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"file '%s' already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster",
|
|
||||||
c.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
c.log.Debugf("Checking terraform working directory")
|
|
||||||
if clean, err := fileHandler.IsEmpty(constants.TerraformWorkingDir); err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return fmt.Errorf("checking if terraform working directory is empty: %w", err)
|
|
||||||
} else if err == nil && !clean {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"directory '%s' already exists and is not empty, run 'constellation terminate' before creating a new one",
|
|
||||||
c.flags.pathPrefixer.PrefixPrintablePath(constants.TerraformWorkingDir),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isPlural(count int) string {
|
func isPlural(count int) string {
|
||||||
if count == 1 {
|
if count == 1 {
|
||||||
return ""
|
return ""
|
||||||
|
@ -8,6 +8,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
"github.com/edgelesssys/constellation/v2/cli/internal/state"
|
||||||
@ -22,31 +23,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// preCreateStateFile returns a state file satisfying the pre-create state file
|
|
||||||
// constraints.
|
|
||||||
func preCreateStateFile() *state.State {
|
|
||||||
s := defaultAzureStateFile()
|
|
||||||
s.ClusterValues = state.ClusterValues{}
|
|
||||||
s.Infrastructure = state.Infrastructure{}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreate(t *testing.T) {
|
func TestCreate(t *testing.T) {
|
||||||
fsWithDefaultConfigAndState := func(require *require.Assertions, provider cloudprovider.Provider) afero.Fs {
|
fsWithDefaultConfig := func(require *require.Assertions, provider cloudprovider.Provider) afero.Fs {
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
file := file.NewHandler(fs)
|
|
||||||
require.NoError(file.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), provider)))
|
|
||||||
stateFile := preCreateStateFile()
|
|
||||||
switch provider {
|
|
||||||
case cloudprovider.GCP:
|
|
||||||
stateFile.SetInfrastructure(state.Infrastructure{GCP: &state.GCP{}})
|
|
||||||
case cloudprovider.Azure:
|
|
||||||
stateFile.SetInfrastructure(state.Infrastructure{Azure: &state.Azure{}})
|
|
||||||
}
|
|
||||||
require.NoError(stateFile.WriteToFile(file, constants.StateFilename))
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
fsWithoutState := func(require *require.Assertions, provider cloudprovider.Provider) afero.Fs {
|
|
||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
file := file.NewHandler(fs)
|
file := file.NewHandler(fs)
|
||||||
require.NoError(file.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), provider)))
|
require.NoError(file.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), provider)))
|
||||||
@ -62,31 +40,49 @@ func TestCreate(t *testing.T) {
|
|||||||
controllerCountFlag *int
|
controllerCountFlag *int
|
||||||
workerCountFlag *int
|
workerCountFlag *int
|
||||||
stdin string
|
stdin string
|
||||||
|
getCreatorErr error
|
||||||
wantErr bool
|
wantErr bool
|
||||||
wantAbort bool
|
wantAbort bool
|
||||||
}{
|
}{
|
||||||
"create": {
|
"create": {
|
||||||
setupFs: fsWithDefaultConfigAndState,
|
setupFs: fsWithDefaultConfig,
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
},
|
},
|
||||||
"interactive": {
|
"interactive": {
|
||||||
setupFs: fsWithDefaultConfigAndState,
|
setupFs: fsWithDefaultConfig,
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
stdin: "yes\n",
|
stdin: "yes\n",
|
||||||
},
|
},
|
||||||
"interactive abort": {
|
"interactive abort": {
|
||||||
setupFs: fsWithDefaultConfigAndState,
|
setupFs: fsWithDefaultConfig,
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
stdin: "no\n",
|
stdin: "no\n",
|
||||||
wantAbort: true,
|
wantAbort: true,
|
||||||
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"interactive error": {
|
"interactive error": {
|
||||||
setupFs: fsWithDefaultConfigAndState,
|
setupFs: fsWithDefaultConfig,
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
stdin: "foo\nfoo\nfoo\n",
|
stdin: "foo\nfoo\nfoo\n",
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -99,7 +95,11 @@ func TestCreate(t *testing.T) {
|
|||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
||||||
return fs
|
return fs
|
||||||
},
|
},
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -112,27 +112,45 @@ func TestCreate(t *testing.T) {
|
|||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
||||||
return fs
|
return fs
|
||||||
},
|
},
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"config does not exist": {
|
"config does not exist": {
|
||||||
setupFs: func(a *require.Assertions, p cloudprovider.Provider) afero.Fs { return afero.NewMemMapFs() },
|
setupFs: func(a *require.Assertions, p cloudprovider.Provider) afero.Fs { return afero.NewMemMapFs() },
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"state file does not exist": {
|
"state file exist (but is empty)": {
|
||||||
setupFs: fsWithoutState,
|
setupFs: func(r *require.Assertions, csp cloudprovider.Provider) afero.Fs {
|
||||||
creator: &stubCloudCreator{state: infraState},
|
fs := afero.NewMemMapFs()
|
||||||
|
file := file.NewHandler(fs)
|
||||||
|
r.NoError(file.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
||||||
|
r.NoError(file.WriteYAML(constants.StateFilename, state.New()))
|
||||||
|
return fs
|
||||||
|
},
|
||||||
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
},
|
},
|
||||||
"create error": {
|
"create error": {
|
||||||
setupFs: fsWithDefaultConfigAndState,
|
setupFs: fsWithDefaultConfig,
|
||||||
creator: &stubCloudCreator{applyErr: assert.AnError},
|
creator: &stubCloudCreator{applyErr: assert.AnError, planDiff: true, workspaceIsEmpty: true},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -144,7 +162,46 @@ func TestCreate(t *testing.T) {
|
|||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, defaultConfigWithExpectedMeasurements(t, config.Default(), csp)))
|
||||||
return afero.NewReadOnlyFs(fs)
|
return afero.NewReadOnlyFs(fs)
|
||||||
},
|
},
|
||||||
creator: &stubCloudCreator{state: infraState},
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
|
provider: cloudprovider.GCP,
|
||||||
|
yesFlag: true,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"check dir clean error": {
|
||||||
|
setupFs: fsWithDefaultConfig,
|
||||||
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmptyErr: assert.AnError,
|
||||||
|
},
|
||||||
|
provider: cloudprovider.GCP,
|
||||||
|
yesFlag: true,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"get creator error": {
|
||||||
|
setupFs: fsWithDefaultConfig,
|
||||||
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
workspaceIsEmptyErr: assert.AnError,
|
||||||
|
},
|
||||||
|
provider: cloudprovider.GCP,
|
||||||
|
yesFlag: true,
|
||||||
|
getCreatorErr: assert.AnError,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"plan error": {
|
||||||
|
setupFs: fsWithDefaultConfig,
|
||||||
|
creator: &stubCloudCreator{
|
||||||
|
state: infraState,
|
||||||
|
planDiff: true,
|
||||||
|
planErr: assert.AnError,
|
||||||
|
workspaceIsEmpty: true,
|
||||||
|
},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -162,17 +219,33 @@ func TestCreate(t *testing.T) {
|
|||||||
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
|
|
||||||
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider))
|
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider))
|
||||||
c := &createCmd{log: logger.NewTest(t), flags: createFlags{yes: tc.yesFlag}}
|
|
||||||
err := c.create(cmd, tc.creator, fileHandler, &nopSpinner{}, stubAttestationFetcher{})
|
a := &applyCmd{
|
||||||
|
fileHandler: fileHandler,
|
||||||
|
flags: applyFlags{
|
||||||
|
yes: tc.yesFlag,
|
||||||
|
skipPhases: newPhases(skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipImagePhase, skipK8sPhase),
|
||||||
|
},
|
||||||
|
|
||||||
|
log: logger.NewTest(t),
|
||||||
|
spinner: &nopSpinner{},
|
||||||
|
|
||||||
|
newInfraApplier: func(_ context.Context) (cloudApplier, func(), error) {
|
||||||
|
return tc.creator, func() {}, tc.getCreatorErr
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := a.apply(cmd, stubAttestationFetcher{}, &stubLicenseClient{}, "create")
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
if tc.wantAbort {
|
||||||
|
assert.True(tc.creator.planCalled)
|
||||||
|
assert.False(tc.creator.applyCalled)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
if tc.wantAbort {
|
|
||||||
assert.False(tc.creator.planCalled)
|
|
||||||
assert.False(tc.creator.applyCalled)
|
|
||||||
} else {
|
|
||||||
assert.True(tc.creator.planCalled)
|
assert.True(tc.creator.planCalled)
|
||||||
assert.True(tc.creator.applyCalled)
|
assert.True(tc.creator.applyCalled)
|
||||||
|
|
||||||
@ -185,7 +258,7 @@ func TestCreate(t *testing.T) {
|
|||||||
require.NoError(fileHandler.ReadYAML(constants.StateFilename, &gotState))
|
require.NoError(fileHandler.ReadYAML(constants.StateFilename, &gotState))
|
||||||
assert.Equal("v1", gotState.Version)
|
assert.Equal("v1", gotState.Version)
|
||||||
assert.Equal(expectedState, gotState.Infrastructure)
|
assert.Equal(expectedState, gotState.Infrastructure)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -209,10 +282,6 @@ func TestCheckDirClean(t *testing.T) {
|
|||||||
existingFiles: []string{constants.AdminConfFilename, constants.MasterSecretFilename},
|
existingFiles: []string{constants.AdminConfFilename, constants.MasterSecretFilename},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"terraform dir exists": {
|
|
||||||
existingFiles: []string{constants.TerraformWorkingDir},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
@ -224,8 +293,8 @@ func TestCheckDirClean(t *testing.T) {
|
|||||||
for _, f := range tc.existingFiles {
|
for _, f := range tc.existingFiles {
|
||||||
require.NoError(fh.Write(f, []byte{1, 2, 3}, file.OptNone))
|
require.NoError(fh.Write(f, []byte{1, 2, 3}, file.OptNone))
|
||||||
}
|
}
|
||||||
c := &createCmd{log: logger.NewTest(t)}
|
a := &applyCmd{log: logger.NewTest(t), fileHandler: fh}
|
||||||
err := c.checkDirClean(fh)
|
err := a.checkInitFilesClean()
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
@ -61,8 +61,8 @@ func TestInitArgumentValidation(t *testing.T) {
|
|||||||
|
|
||||||
// preInitStateFile returns a state file satisfying the pre-init state file
|
// preInitStateFile returns a state file satisfying the pre-init state file
|
||||||
// constraints.
|
// constraints.
|
||||||
func preInitStateFile() *state.State {
|
func preInitStateFile(csp cloudprovider.Provider) *state.State {
|
||||||
s := defaultAzureStateFile()
|
s := defaultStateFile(csp)
|
||||||
s.ClusterValues = state.ClusterValues{}
|
s.ClusterValues = state.ClusterValues{}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@ -109,24 +109,24 @@ func TestInitialize(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
"initialize some gcp instances": {
|
"initialize some gcp instances": {
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.GCP),
|
||||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||||
serviceAccKey: gcpServiceAccKey,
|
serviceAccKey: gcpServiceAccKey,
|
||||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||||
},
|
},
|
||||||
"initialize some azure instances": {
|
"initialize some azure instances": {
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||||
},
|
},
|
||||||
"initialize some qemu instances": {
|
"initialize some qemu instances": {
|
||||||
provider: cloudprovider.QEMU,
|
provider: cloudprovider.QEMU,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||||
},
|
},
|
||||||
"non retriable error": {
|
"non retriable error": {
|
||||||
provider: cloudprovider.QEMU,
|
provider: cloudprovider.QEMU,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||||
initServerAPI: &stubInitServer{initErr: &nonRetriableError{err: assert.AnError}},
|
initServerAPI: &stubInitServer{initErr: &nonRetriableError{err: assert.AnError}},
|
||||||
retriable: false,
|
retriable: false,
|
||||||
masterSecretShouldExist: true,
|
masterSecretShouldExist: true,
|
||||||
@ -134,7 +134,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"non retriable error with failed log collection": {
|
"non retriable error with failed log collection": {
|
||||||
provider: cloudprovider.QEMU,
|
provider: cloudprovider.QEMU,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||||
initServerAPI: &stubInitServer{
|
initServerAPI: &stubInitServer{
|
||||||
res: []*initproto.InitResponse{
|
res: []*initproto.InitResponse{
|
||||||
{
|
{
|
||||||
@ -185,7 +185,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
"init call fails": {
|
"init call fails": {
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.GCP),
|
||||||
serviceAccKey: gcpServiceAccKey,
|
serviceAccKey: gcpServiceAccKey,
|
||||||
initServerAPI: &stubInitServer{initErr: assert.AnError},
|
initServerAPI: &stubInitServer{initErr: assert.AnError},
|
||||||
retriable: false,
|
retriable: false,
|
||||||
@ -194,7 +194,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"k8s version without v works": {
|
"k8s version without v works": {
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||||
configMutator: func(c *config.Config) {
|
configMutator: func(c *config.Config) {
|
||||||
res, err := versions.NewValidK8sVersion(strings.TrimPrefix(string(versions.Default), "v"), true)
|
res, err := versions.NewValidK8sVersion(strings.TrimPrefix(string(versions.Default), "v"), true)
|
||||||
@ -204,7 +204,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"outdated k8s patch version doesn't work": {
|
"outdated k8s patch version doesn't work": {
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
stateFile: preInitStateFile(),
|
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||||
configMutator: func(c *config.Config) {
|
configMutator: func(c *config.Config) {
|
||||||
v, err := semver.New(versions.SupportedK8sVersions()[0])
|
v, err := semver.New(versions.SupportedK8sVersions()[0])
|
||||||
@ -263,11 +263,13 @@ func TestInitialize(t *testing.T) {
|
|||||||
|
|
||||||
i := &applyCmd{
|
i := &applyCmd{
|
||||||
fileHandler: fileHandler,
|
fileHandler: fileHandler,
|
||||||
flags: applyFlags{rootFlags: rootFlags{force: true}},
|
flags: applyFlags{
|
||||||
|
rootFlags: rootFlags{force: true},
|
||||||
|
skipPhases: newPhases(skipInfrastructurePhase),
|
||||||
|
},
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
spinner: &nopSpinner{},
|
spinner: &nopSpinner{},
|
||||||
merger: &stubMerger{},
|
merger: &stubMerger{},
|
||||||
quotaChecker: &stubLicenseClient{},
|
|
||||||
newHelmClient: func(string, debugLog) (helmApplier, error) {
|
newHelmClient: func(string, debugLog) (helmApplier, error) {
|
||||||
return &stubApplier{}, nil
|
return &stubApplier{}, nil
|
||||||
},
|
},
|
||||||
@ -278,15 +280,13 @@ func TestInitialize(t *testing.T) {
|
|||||||
getClusterAttestationConfigErr: k8serrors.NewNotFound(schema.GroupResource{}, ""),
|
getClusterAttestationConfigErr: k8serrors.NewNotFound(schema.GroupResource{}, ""),
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
newInfraApplier: func(ctx context.Context) (cloudApplier, func(), error) {
|
|
||||||
return stubTerraformUpgrader{}, func() {}, nil
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := i.apply(cmd, stubAttestationFetcher{}, "test")
|
err := i.apply(cmd, stubAttestationFetcher{}, &stubLicenseClient{}, "test")
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
fmt.Println(err)
|
||||||
if !tc.retriable {
|
if !tc.retriable {
|
||||||
assert.Contains(errOut.String(), "This error is not recoverable")
|
assert.Contains(errOut.String(), "This error is not recoverable")
|
||||||
} else {
|
} else {
|
||||||
@ -733,6 +733,17 @@ func defaultConfigWithExpectedMeasurements(t *testing.T, conf *config.Config, cs
|
|||||||
|
|
||||||
var zone, instanceType, diskType string
|
var zone, instanceType, diskType string
|
||||||
switch csp {
|
switch csp {
|
||||||
|
case cloudprovider.AWS:
|
||||||
|
conf.Provider.AWS.Region = "test-region-2"
|
||||||
|
conf.Provider.AWS.Zone = "test-zone-2c"
|
||||||
|
conf.Provider.AWS.IAMProfileControlPlane = "test-iam-profile"
|
||||||
|
conf.Provider.AWS.IAMProfileWorkerNodes = "test-iam-profile"
|
||||||
|
conf.Attestation.AWSSEVSNP.Measurements[4] = measurements.WithAllBytes(0x44, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||||
|
conf.Attestation.AWSSEVSNP.Measurements[9] = measurements.WithAllBytes(0x11, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||||
|
conf.Attestation.AWSSEVSNP.Measurements[12] = measurements.WithAllBytes(0xcc, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||||
|
zone = "test-zone-2c"
|
||||||
|
instanceType = "c6a.xlarge"
|
||||||
|
diskType = "gp3"
|
||||||
case cloudprovider.Azure:
|
case cloudprovider.Azure:
|
||||||
conf.Provider.Azure.SubscriptionID = "01234567-0123-0123-0123-0123456789ab"
|
conf.Provider.Azure.SubscriptionID = "01234567-0123-0123-0123-0123456789ab"
|
||||||
conf.Provider.Azure.TenantID = "01234567-0123-0123-0123-0123456789ab"
|
conf.Provider.Azure.TenantID = "01234567-0123-0123-0123-0123456789ab"
|
||||||
|
@ -158,7 +158,7 @@ func TestRecover(t *testing.T) {
|
|||||||
))
|
))
|
||||||
require.NoError(fileHandler.WriteYAML(
|
require.NoError(fileHandler.WriteYAML(
|
||||||
constants.StateFilename,
|
constants.StateFilename,
|
||||||
defaultGCPStateFile(),
|
defaultStateFile(cloudprovider.GCP),
|
||||||
file.OptNone,
|
file.OptNone,
|
||||||
))
|
))
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
fsWithStateFileAndTfState := func() file.Handler {
|
fsWithStateFileAndTfState := func() file.Handler {
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
fh := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(t, fh.MkdirAll(constants.TerraformWorkingDir))
|
require.NoError(t, fh.MkdirAll(constants.TerraformWorkingDir))
|
||||||
require.NoError(t, fh.WriteYAML(constants.StateFilename, defaultAzureStateFile()))
|
require.NoError(t, fh.WriteYAML(constants.StateFilename, defaultStateFile(cloudprovider.Azure)))
|
||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
gotState, err := state.ReadFromFile(fh, constants.StateFilename)
|
gotState, err := state.ReadFromFile(fh, constants.StateFilename)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
assert.Equal("v1", gotState.Version)
|
assert.Equal("v1", gotState.Version)
|
||||||
assert.Equal(defaultAzureStateFile(), gotState)
|
assert.Equal(defaultStateFile(cloudprovider.Azure), gotState)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"id file and state file do not exist": {
|
"id file and state file do not exist": {
|
||||||
@ -180,11 +180,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
helmUpgrader: &mockApplier{}, // mocks ensure that no methods are called
|
helmUpgrader: &mockApplier{}, // mocks ensure that no methods are called
|
||||||
terraformUpgrader: &mockTerraformUpgrader{},
|
terraformUpgrader: &mockTerraformUpgrader{},
|
||||||
flags: applyFlags{
|
flags: applyFlags{
|
||||||
skipPhases: skipPhases{
|
skipPhases: newPhases(skipInfrastructurePhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase),
|
||||||
skipInfrastructurePhase: struct{}{}, skipHelmPhase: struct{}{},
|
|
||||||
skipK8sPhase: struct{}{}, skipImagePhase: struct{}{},
|
|
||||||
skipInitPhase: struct{}{},
|
|
||||||
},
|
|
||||||
yes: true,
|
yes: true,
|
||||||
},
|
},
|
||||||
fh: fsWithStateFileAndTfState,
|
fh: fsWithStateFileAndTfState,
|
||||||
@ -196,15 +192,12 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
helmUpgrader: &mockApplier{}, // mocks ensure that no methods are called
|
helmUpgrader: &mockApplier{}, // mocks ensure that no methods are called
|
||||||
terraformUpgrader: &mockTerraformUpgrader{},
|
terraformUpgrader: &mockTerraformUpgrader{},
|
||||||
flags: applyFlags{
|
flags: applyFlags{
|
||||||
skipPhases: skipPhases{
|
skipPhases: newPhases(skipInfrastructurePhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase),
|
||||||
skipInfrastructurePhase: struct{}{}, skipHelmPhase: struct{}{},
|
|
||||||
skipK8sPhase: struct{}{}, skipInitPhase: struct{}{},
|
|
||||||
},
|
|
||||||
yes: true,
|
yes: true,
|
||||||
},
|
},
|
||||||
fh: fsWithStateFileAndTfState,
|
fh: fsWithStateFileAndTfState,
|
||||||
},
|
},
|
||||||
"no tf state, skip infrastructure upgrade": {
|
"no tf state, infra phase skipped": {
|
||||||
kubeUpgrader: &stubKubernetesUpgrader{
|
kubeUpgrader: &stubKubernetesUpgrader{
|
||||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||||
},
|
},
|
||||||
@ -212,13 +205,11 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
terraformUpgrader: &mockTerraformUpgrader{},
|
terraformUpgrader: &mockTerraformUpgrader{},
|
||||||
flags: applyFlags{
|
flags: applyFlags{
|
||||||
yes: true,
|
yes: true,
|
||||||
skipPhases: skipPhases{
|
skipPhases: newPhases(skipInfrastructurePhase),
|
||||||
skipInitPhase: struct{}{},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
fh: func() file.Handler {
|
fh: func() file.Handler {
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
fh := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(t, fh.WriteYAML(constants.StateFilename, defaultAzureStateFile()))
|
require.NoError(t, fh.WriteYAML(constants.StateFilename, defaultStateFile(cloudprovider.Azure)))
|
||||||
return fh
|
return fh
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -254,7 +245,6 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
spinner: &nopSpinner{},
|
spinner: &nopSpinner{},
|
||||||
merger: &stubMerger{},
|
merger: &stubMerger{},
|
||||||
quotaChecker: &stubLicenseClient{},
|
|
||||||
newHelmClient: func(string, debugLog) (helmApplier, error) {
|
newHelmClient: func(string, debugLog) (helmApplier, error) {
|
||||||
return tc.helmUpgrader, nil
|
return tc.helmUpgrader, nil
|
||||||
},
|
},
|
||||||
@ -265,7 +255,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
return tc.terraformUpgrader, func() {}, nil
|
return tc.terraformUpgrader, func() {}, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := upgrader.apply(cmd, stubAttestationFetcher{}, "test")
|
err := upgrader.apply(cmd, stubAttestationFetcher{}, &stubLicenseClient{}, "test")
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
return
|
return
|
||||||
@ -338,6 +328,10 @@ func (u stubTerraformUpgrader) RestoreWorkspace() error {
|
|||||||
return u.rollbackWorkspaceErr
|
return u.rollbackWorkspaceErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (u stubTerraformUpgrader) WorkingDirIsEmpty() (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
type mockTerraformUpgrader struct {
|
type mockTerraformUpgrader struct {
|
||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
@ -357,6 +351,11 @@ func (m *mockTerraformUpgrader) RestoreWorkspace() error {
|
|||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockTerraformUpgrader) WorkingDirIsEmpty() (bool, error) {
|
||||||
|
args := m.Called()
|
||||||
|
return args.Bool(0), args.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
type mockApplier struct {
|
type mockApplier struct {
|
||||||
mock.Mock
|
mock.Mock
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ func TestVerify(t *testing.T) {
|
|||||||
formatter *stubAttDocFormatter
|
formatter *stubAttDocFormatter
|
||||||
nodeEndpointFlag string
|
nodeEndpointFlag string
|
||||||
clusterIDFlag string
|
clusterIDFlag string
|
||||||
stateFile func() *state.State
|
stateFile *state.State
|
||||||
wantEndpoint string
|
wantEndpoint string
|
||||||
skipConfigCreation bool
|
skipConfigCreation bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
@ -58,7 +58,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: defaultGCPStateFile,
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
||||||
wantEndpoint: "192.0.2.1:1234",
|
wantEndpoint: "192.0.2.1:1234",
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
@ -67,7 +67,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: defaultAzureStateFile,
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
||||||
wantEndpoint: "192.0.2.1:1234",
|
wantEndpoint: "192.0.2.1:1234",
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
@ -76,7 +76,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1",
|
nodeEndpointFlag: "192.0.2.1",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: defaultGCPStateFile,
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
||||||
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
|
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
@ -85,10 +85,10 @@ func TestVerify(t *testing.T) {
|
|||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: func() *state.State {
|
stateFile: func() *state.State {
|
||||||
s := defaultGCPStateFile()
|
s := defaultStateFile(cloudprovider.GCP)
|
||||||
s.Infrastructure.ClusterEndpoint = ""
|
s.Infrastructure.ClusterEndpoint = ""
|
||||||
return s
|
return s
|
||||||
},
|
}(),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -97,10 +97,10 @@ func TestVerify(t *testing.T) {
|
|||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: func() *state.State {
|
stateFile: func() *state.State {
|
||||||
s := defaultGCPStateFile()
|
s := defaultStateFile(cloudprovider.GCP)
|
||||||
s.Infrastructure.ClusterEndpoint = "192.0.2.1"
|
s.Infrastructure.ClusterEndpoint = "192.0.2.1"
|
||||||
return s
|
return s
|
||||||
},
|
}(),
|
||||||
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
|
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
@ -110,10 +110,10 @@ func TestVerify(t *testing.T) {
|
|||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: func() *state.State {
|
stateFile: func() *state.State {
|
||||||
s := defaultGCPStateFile()
|
s := defaultStateFile(cloudprovider.GCP)
|
||||||
s.Infrastructure.ClusterEndpoint = "192.0.2.1"
|
s.Infrastructure.ClusterEndpoint = "192.0.2.1"
|
||||||
return s
|
return s
|
||||||
},
|
}(),
|
||||||
wantEndpoint: "192.0.2.2:1234",
|
wantEndpoint: "192.0.2.2:1234",
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
@ -122,7 +122,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: ":::::",
|
nodeEndpointFlag: ":::::",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: defaultGCPStateFile,
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -130,11 +130,11 @@ func TestVerify(t *testing.T) {
|
|||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
stateFile: func() *state.State {
|
stateFile: func() *state.State {
|
||||||
s := defaultGCPStateFile()
|
s := defaultStateFile(cloudprovider.GCP)
|
||||||
s.ClusterValues.OwnerID = ""
|
s.ClusterValues.OwnerID = ""
|
||||||
s.ClusterValues.ClusterID = ""
|
s.ClusterValues.ClusterID = ""
|
||||||
return s
|
return s
|
||||||
},
|
}(),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -144,10 +144,10 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: func() *state.State {
|
stateFile: func() *state.State {
|
||||||
s := defaultGCPStateFile()
|
s := defaultStateFile(cloudprovider.GCP)
|
||||||
s.ClusterValues.OwnerID = zeroBase64
|
s.ClusterValues.OwnerID = zeroBase64
|
||||||
return s
|
return s
|
||||||
},
|
}(),
|
||||||
wantEndpoint: "192.0.2.1:1234",
|
wantEndpoint: "192.0.2.1:1234",
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
@ -155,7 +155,7 @@ func TestVerify(t *testing.T) {
|
|||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
stateFile: defaultGCPStateFile,
|
stateFile: defaultStateFile(cloudprovider.GCP),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
skipConfigCreation: true,
|
skipConfigCreation: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -165,7 +165,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{verifyErr: rpcStatus.Error(codes.Internal, "failed")},
|
protoClient: &stubVerifyClient{verifyErr: rpcStatus.Error(codes.Internal, "failed")},
|
||||||
stateFile: defaultAzureStateFile,
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -174,7 +174,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{verifyErr: someErr},
|
protoClient: &stubVerifyClient{verifyErr: someErr},
|
||||||
stateFile: defaultAzureStateFile,
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -183,7 +183,7 @@ func TestVerify(t *testing.T) {
|
|||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
protoClient: &stubVerifyClient{},
|
protoClient: &stubVerifyClient{},
|
||||||
stateFile: defaultAzureStateFile,
|
stateFile: defaultStateFile(cloudprovider.Azure),
|
||||||
wantEndpoint: "192.0.2.1:1234",
|
wantEndpoint: "192.0.2.1:1234",
|
||||||
formatter: &stubAttDocFormatter{formatErr: someErr},
|
formatter: &stubAttDocFormatter{formatErr: someErr},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -204,7 +204,7 @@ func TestVerify(t *testing.T) {
|
|||||||
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
|
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
|
||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
|
||||||
}
|
}
|
||||||
require.NoError(tc.stateFile().WriteToFile(fileHandler, constants.StateFilename))
|
require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename))
|
||||||
|
|
||||||
v := &verifyCmd{
|
v := &verifyCmd{
|
||||||
fileHandler: fileHandler,
|
fileHandler: fileHandler,
|
||||||
|
@ -11,9 +11,7 @@ go_library(
|
|||||||
visibility = ["//cli:__subpackages__"],
|
visibility = ["//cli:__subpackages__"],
|
||||||
deps = [
|
deps = [
|
||||||
"//internal/cloud/cloudprovider",
|
"//internal/cloud/cloudprovider",
|
||||||
"//internal/constants",
|
|
||||||
"//internal/file",
|
"//internal/file",
|
||||||
"//internal/semver",
|
|
||||||
"//internal/validation",
|
"//internal/validation",
|
||||||
"@cat_dario_mergo//:mergo",
|
"@cat_dario_mergo//:mergo",
|
||||||
"@com_github_siderolabs_talos_pkg_machinery//config/encoder",
|
"@com_github_siderolabs_talos_pkg_machinery//config/encoder",
|
||||||
|
@ -20,9 +20,7 @@ import (
|
|||||||
|
|
||||||
"dario.cat/mergo"
|
"dario.cat/mergo"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/validation"
|
"github.com/edgelesssys/constellation/v2/internal/validation"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -557,24 +555,6 @@ func (s *State) Constraints() []*validation.Constraint {
|
|||||||
return []*validation.Constraint{}
|
return []*validation.Constraint{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migrate migrates the state to the current version.
|
|
||||||
// This is mostly done to pass the validation of the current version.
|
|
||||||
// The infrastructure will be overwritten by the terraform outputs after the validation.
|
|
||||||
func (s *State) Migrate() error {
|
|
||||||
// In v2.13.0 the ClusterEndpoint and InClusterEndpoint fields were added.
|
|
||||||
// So they are expected to be empty when upgrading to this version.
|
|
||||||
// TODO(3u13r): Remove on main after v2.13.0 is released.
|
|
||||||
if constants.BinaryVersion().MajorMinorEqual(semver.NewFromInt(2, 13, 0, "")) {
|
|
||||||
if s.Infrastructure.InClusterEndpoint == "" {
|
|
||||||
s.Infrastructure.InClusterEndpoint = s.Infrastructure.ClusterEndpoint
|
|
||||||
}
|
|
||||||
if s.Infrastructure.IPCidrNode == "" {
|
|
||||||
s.Infrastructure.IPCidrNode = "192.168.2.1/32"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HexBytes is a byte slice that is marshalled to and from a hex string.
|
// HexBytes is a byte slice that is marshalled to and from a hex string.
|
||||||
type HexBytes []byte
|
type HexBytes []byte
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ Any changes to the image will inevitably also change the measured boot's PCR val
|
|||||||
To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware.
|
To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware.
|
||||||
This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement.
|
This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement.
|
||||||
|
|
||||||
In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md#the-apply-step) that irrevocably marks the node as initialized.
|
In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized.
|
||||||
The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID.
|
The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID.
|
||||||
Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized.
|
Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized.
|
||||||
|
|
||||||
|
@ -14,6 +14,11 @@ The CLI stores state in the local filesystem making the current directory the ac
|
|||||||
Multiple clusters require multiple workspaces, hence, multiple directories.
|
Multiple clusters require multiple workspaces, hence, multiple directories.
|
||||||
Note that every operation on a cluster always has to be performed from the directory associated with its workspace.
|
Note that every operation on a cluster always has to be performed from the directory associated with its workspace.
|
||||||
|
|
||||||
|
You may copy files from the workspace to other locations,
|
||||||
|
but you shouldn't move or delete them while the cluster is still being used.
|
||||||
|
The Constellation CLI takes care of managing the workspace.
|
||||||
|
Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace.
|
||||||
|
|
||||||
## Cluster creation process
|
## Cluster creation process
|
||||||
|
|
||||||
To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace.
|
To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace.
|
||||||
@ -32,11 +37,11 @@ In addition, the cluster's [identifier](orchestration.md#post-installation-confi
|
|||||||
|
|
||||||
### Creation process details
|
### Creation process details
|
||||||
|
|
||||||
1. The CLI `create` command creates the confidential VM (CVM) resources in your cloud environment and configures the network
|
1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network
|
||||||
2. Each CVM boots the Constellation node image and measures every component in the boot chain
|
2. Each CVM boots the Constellation node image and measures every component in the boot chain
|
||||||
3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper)
|
3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper)
|
||||||
4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster
|
4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster
|
||||||
5. The CLI `apply` command connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster
|
5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster
|
||||||
6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice)
|
6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice)
|
||||||
7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService*
|
7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService*
|
||||||
8. As part of the join request each node includes an attestation statement of its boot measurements as authentication
|
8. As part of the join request each node includes an attestation statement of its boot measurements as authentication
|
||||||
|
@ -100,29 +100,22 @@ attaching persistent storage, or autoscaling aren't available.
|
|||||||
|
|
||||||
This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory.
|
This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory.
|
||||||
|
|
||||||
2. Now you can create your cluster and its nodes. `constellation create` uses the options set in `constellation-conf.yaml`.
|
2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
constellation create
|
constellation apply -y
|
||||||
```
|
```
|
||||||
|
|
||||||
The Output should look like the following:
|
The Output should look like the following:
|
||||||
|
|
||||||
```shell-session
|
```shell-session
|
||||||
$ constellation create
|
$ constellation apply -y
|
||||||
Your Constellation cluster was created successfully.
|
Checking for infrastructure changes
|
||||||
```
|
The following Constellation cluster will be created:
|
||||||
|
3 control-plane nodes of type 2-vCPUs will be created.
|
||||||
3. Initialize the cluster
|
1 worker node of type 2-vCPUs will be created.
|
||||||
|
Creating
|
||||||
```bash
|
Cloud infrastructure created successfully.
|
||||||
constellation apply
|
|
||||||
```
|
|
||||||
|
|
||||||
This should give the following output:
|
|
||||||
|
|
||||||
```shell-session
|
|
||||||
$ constellation apply
|
|
||||||
Your Constellation master secret was successfully written to ./constellation-mastersecret.json
|
Your Constellation master secret was successfully written to ./constellation-mastersecret.json
|
||||||
Connecting
|
Connecting
|
||||||
Initializing cluster
|
Initializing cluster
|
||||||
@ -146,7 +139,7 @@ attaching persistent storage, or autoscaling aren't available.
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
4. Configure kubectl
|
3. Configure kubectl
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export KUBECONFIG="$PWD/constellation-admin.conf"
|
export KUBECONFIG="$PWD/constellation-admin.conf"
|
||||||
|
@ -118,7 +118,7 @@ If you encounter any problem with the following steps, make sure to use the [lat
|
|||||||
:::
|
:::
|
||||||
-->
|
-->
|
||||||
|
|
||||||
3. Create the cluster. `constellation create` uses options set in `constellation-conf.yaml`.
|
3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`.
|
||||||
If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md).
|
If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md).
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
@ -128,26 +128,19 @@ If you encounter any problem with the following steps, make sure to use the [lat
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
constellation create -y
|
constellation apply -y
|
||||||
```
|
```
|
||||||
|
|
||||||
This should give the following output:
|
This should look similar to the following:
|
||||||
|
|
||||||
```shell-session
|
```shell-session
|
||||||
$ constellation create -y
|
$ constellation apply -y
|
||||||
Your Constellation cluster was created successfully.
|
Checking for infrastructure changes
|
||||||
```
|
The following Constellation cluster will be created:
|
||||||
|
3 control-plane nodes of type n2d-standard-4 will be created.
|
||||||
4. Initialize the cluster.
|
1 worker node of type n2d-standard-4 will be created.
|
||||||
|
Creating
|
||||||
```bash
|
Cloud infrastructure created successfully
|
||||||
constellation apply
|
|
||||||
```
|
|
||||||
|
|
||||||
This should give the following output:
|
|
||||||
|
|
||||||
```shell-session
|
|
||||||
$ constellation apply
|
|
||||||
Your Constellation master secret was successfully written to ./constellation-mastersecret.json
|
Your Constellation master secret was successfully written to ./constellation-mastersecret.json
|
||||||
Connecting
|
Connecting
|
||||||
Initializing cluster
|
Initializing cluster
|
||||||
@ -171,7 +164,7 @@ If you encounter any problem with the following steps, make sure to use the [lat
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
5. Configure kubectl.
|
4. Configure kubectl.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export KUBECONFIG="$PWD/constellation-admin.conf"
|
export KUBECONFIG="$PWD/constellation-admin.conf"
|
||||||
|
@ -129,7 +129,7 @@ To [create the IAM configuration](../workflows/config.md#creating-an-iam-configu
|
|||||||
|
|
||||||
The built-in `Owner` role is a superset of these permissions.
|
The built-in `Owner` role is a superset of these permissions.
|
||||||
|
|
||||||
To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions:
|
To [create a Constellation cluster](../workflows/create.md), you need the following permissions:
|
||||||
* `Microsoft.Attestation/attestationProviders/*` \[2]
|
* `Microsoft.Attestation/attestationProviders/*` \[2]
|
||||||
* `Microsoft.Compute/virtualMachineScaleSets/*`
|
* `Microsoft.Compute/virtualMachineScaleSets/*`
|
||||||
* `Microsoft.Insights/components/*`
|
* `Microsoft.Insights/components/*`
|
||||||
@ -168,7 +168,7 @@ To [create the IAM configuration](../workflows/config.md#creating-an-iam-configu
|
|||||||
|
|
||||||
Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions.
|
Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions.
|
||||||
|
|
||||||
To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions:
|
To [create a Constellation cluster](../workflows/create.md), you need the following permissions:
|
||||||
* `compute.addresses.createInternal`
|
* `compute.addresses.createInternal`
|
||||||
* `compute.addresses.deleteInternal`
|
* `compute.addresses.deleteInternal`
|
||||||
* `compute.addresses.get`
|
* `compute.addresses.get`
|
||||||
@ -279,7 +279,7 @@ To [create the IAM configuration](../workflows/config.md#creating-an-iam-configu
|
|||||||
|
|
||||||
The built-in `AdministratorAccess` policy is a superset of these permissions.
|
The built-in `AdministratorAccess` policy is a superset of these permissions.
|
||||||
|
|
||||||
To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf).
|
To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf).
|
||||||
|
|
||||||
|
|
||||||
The built-in `PowerUserAccess` policy is a superset of these permissions.
|
The built-in `PowerUserAccess` policy is a superset of these permissions.
|
||||||
|
@ -18,7 +18,7 @@ Currently, these subdirectories are:
|
|||||||
* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster
|
* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster
|
||||||
* `constellation-iam-terraform` - Terraform state files for IAM configuration
|
* `constellation-iam-terraform` - Terraform state files for IAM configuration
|
||||||
|
|
||||||
As with all commands, commands that work with these files (e.g., `create`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually.
|
As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually.
|
||||||
|
|
||||||
## Interacting with Terraform manually
|
## Interacting with Terraform manually
|
||||||
|
|
||||||
@ -27,11 +27,11 @@ Manual interaction with Terraform state created by Constellation (i.e., via the
|
|||||||
## Terraform debugging
|
## Terraform debugging
|
||||||
|
|
||||||
To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging):
|
To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging):
|
||||||
- `JSON` (JSON-formatted logs at `TRACE` level)
|
* `JSON` (JSON-formatted logs at `TRACE` level)
|
||||||
- `TRACE`
|
* `TRACE`
|
||||||
- `DEBUG`
|
* `DEBUG`
|
||||||
- `INFO`
|
* `INFO`
|
||||||
- `WARN`
|
* `WARN`
|
||||||
- `ERROR`
|
* `ERROR`
|
||||||
|
|
||||||
The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run.
|
The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run.
|
||||||
|
@ -8,10 +8,16 @@ This recording presents the essence of this page. It's recommended to read it in
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
Creating your cluster requires two steps:
|
Creating your cluster happens through multiple phases.
|
||||||
|
The most significant ones are:
|
||||||
|
|
||||||
1. Creating the necessary resources in your cloud environment
|
1. Creating the necessary resources in your cloud environment
|
||||||
2. Bootstrapping the Constellation cluster and setting up a connection
|
2. Bootstrapping the Constellation cluster and setting up a connection
|
||||||
|
3. Installing the necessary Kubernetes components
|
||||||
|
|
||||||
|
`constellation apply` handles all this in a single command.
|
||||||
|
You can use the `--skip-phases` flag to skip specific phases of the process.
|
||||||
|
For example, if you created the infrastructure manually, you can skip the cloud resource creation phase.
|
||||||
|
|
||||||
See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process.
|
See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process.
|
||||||
|
|
||||||
@ -19,21 +25,16 @@ See the [architecture](../architecture/orchestration.md) section for details on
|
|||||||
If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing.
|
If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## The *create* step
|
|
||||||
|
|
||||||
This step creates the necessary resources for your cluster in your cloud environment.
|
|
||||||
Before you create the cluster, make sure to have a [valid configuration file](./config.md).
|
Before you create the cluster, make sure to have a [valid configuration file](./config.md).
|
||||||
|
|
||||||
### Create
|
|
||||||
|
|
||||||
<tabs groupId="usage">
|
<tabs groupId="usage">
|
||||||
<tabItem value="cli" label="CLI">
|
<tabItem value="cli" label="CLI">
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
constellation create
|
constellation apply
|
||||||
```
|
```
|
||||||
|
|
||||||
*create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace.
|
`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace.
|
||||||
|
|
||||||
</tabItem>
|
</tabItem>
|
||||||
<tabItem value="self-managed" label="Self-managed">
|
<tabItem value="self-managed" label="Self-managed">
|
||||||
@ -70,20 +71,16 @@ Make sure all necessary resources are created, e.g., through checking your CSP's
|
|||||||
|
|
||||||
Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field.
|
Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field.
|
||||||
|
|
||||||
Continue with [initializing your cluster](#the-apply-step).
|
With the required cloud resources set up, continue with initializing your cluster.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
constellation apply --skip-phases=infrastructure
|
||||||
|
```
|
||||||
|
|
||||||
</tabItem>
|
</tabItem>
|
||||||
</tabs>
|
</tabs>
|
||||||
|
|
||||||
## The *apply* step
|
Finally, configure `kubectl` for your cluster:
|
||||||
|
|
||||||
The following command initializes and bootstraps your cluster:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
constellation apply
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, configure `kubectl` for your cluster:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export KUBECONFIG="$PWD/constellation-admin.conf"
|
export KUBECONFIG="$PWD/constellation-admin.conf"
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
# Expose a service
|
# Expose a service
|
||||||
|
|
||||||
Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer).
|
Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Internet-facing LB service on AWS
|
## Internet-facing LB service on AWS
|
||||||
|
|
||||||
To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancing Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS.
|
To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancing Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS.
|
||||||
|
@ -31,7 +31,7 @@ constellation terminate --yes
|
|||||||
```
|
```
|
||||||
|
|
||||||
This deletes all resources created by Constellation in your cloud environment.
|
This deletes all resources created by Constellation in your cloud environment.
|
||||||
All local files created by the `create` and `apply` commands are deleted as well, except for `constellation-mastersecret.json` and the configuration file.
|
All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file.
|
||||||
|
|
||||||
:::caution
|
:::caution
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ If something doesn't work, check out the [known issues](https://github.com/edgel
|
|||||||
|
|
||||||
### Azure: Resource Providers can't be registered
|
### Azure: Resource Providers can't be registered
|
||||||
|
|
||||||
On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions:
|
On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions:
|
||||||
|
|
||||||
```shell-session
|
```shell-session
|
||||||
Error: Error ensuring Resource Providers are registered.
|
Error: Error ensuring Resource Providers are registered.
|
||||||
@ -27,11 +27,11 @@ If you don't have permission to register Resource Providers you may wish to use
|
|||||||
|
|
||||||
To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator.
|
To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator.
|
||||||
|
|
||||||
Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again.
|
Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ARM_SKIP_PROVIDER_REGISTRATION=true constellation create
|
ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply
|
||||||
```
|
```
|
||||||
|
|
||||||
Or alternatively, for `terminate`:
|
Or alternatively, for `terminate`:
|
||||||
|
@ -26,6 +26,7 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I
|
|||||||
```
|
```
|
||||||
|
|
||||||
The script creates the following resources:
|
The script creates the following resources:
|
||||||
|
|
||||||
1. A new image gallery with the default name `constellation-import`
|
1. A new image gallery with the default name `constellation-import`
|
||||||
2. A new image definition with the default name `constellation`
|
2. A new image definition with the default name `constellation`
|
||||||
3. The actual image with the provided version. In this case `2.2.0`
|
3. The actual image with the provided version. In this case `2.2.0`
|
||||||
@ -42,7 +43,7 @@ constellation config fetch-measurements -u$URL -s$URL.sig
|
|||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
The [constellation create](create.md) command will issue a warning because manually imported images aren't recognized as production grade images:
|
The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images:
|
||||||
|
|
||||||
```shell-session
|
```shell-session
|
||||||
Configured image doesn't look like a released production image. Double check image before deploying to production.
|
Configured image doesn't look like a released production image. Double check image before deploying to production.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user