mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-24 06:59:40 -05:00
cli: add --workspace
flag to set base directory for Constellation workspace (#2148)
* Remove `--config` and `--master-secret` falgs * Add `--workspace` flag * In CLI, only work on files with paths created from `cli/internal/cmd` * Properly print values for GCP on IAM create when not directly updating the config --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
ec33530c38
commit
d1ace13713
@ -7,7 +7,6 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//cli/internal/cmd",
|
"//cli/internal/cmd",
|
||||||
"//internal/constants",
|
|
||||||
"@com_github_spf13_cobra//:cobra",
|
"@com_github_spf13_cobra//:cobra",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/cmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/cmd"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,22 +33,22 @@ func Execute() error {
|
|||||||
// NewRootCmd creates the root command.
|
// NewRootCmd creates the root command.
|
||||||
func NewRootCmd() *cobra.Command {
|
func NewRootCmd() *cobra.Command {
|
||||||
rootCmd := &cobra.Command{
|
rootCmd := &cobra.Command{
|
||||||
Use: "constellation",
|
Use: "constellation",
|
||||||
Short: "Manage your Constellation cluster",
|
Short: "Manage your Constellation cluster",
|
||||||
Long: "Manage your Constellation cluster.",
|
Long: "Manage your Constellation cluster.",
|
||||||
PersistentPreRun: preRunRoot,
|
PersistentPreRunE: preRunRoot,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set output of cmd.Print to stdout. (By default, it's stderr.)
|
// Set output of cmd.Print to stdout. (By default, it's stderr.)
|
||||||
rootCmd.SetOut(os.Stdout)
|
rootCmd.SetOut(os.Stdout)
|
||||||
|
|
||||||
rootCmd.PersistentFlags().String("config", constants.ConfigFilename, "path to the configuration file")
|
rootCmd.PersistentFlags().StringP("workspace", "C", "", "path to the Constellation workspace")
|
||||||
must(rootCmd.MarkPersistentFlagFilename("config", "yaml"))
|
|
||||||
|
|
||||||
rootCmd.PersistentFlags().Bool("debug", false, "enable debug logging")
|
rootCmd.PersistentFlags().Bool("debug", false, "enable debug logging")
|
||||||
rootCmd.PersistentFlags().Bool("force", false, "disable version compatibility checks - might result in corrupted clusters")
|
rootCmd.PersistentFlags().Bool("force", false, "disable version compatibility checks - might result in corrupted clusters")
|
||||||
rootCmd.PersistentFlags().String("tf-log", "NONE", "Terraform log level")
|
rootCmd.PersistentFlags().String("tf-log", "NONE", "Terraform log level")
|
||||||
|
|
||||||
|
must(rootCmd.MarkPersistentFlagDirname("workspace"))
|
||||||
|
|
||||||
rootCmd.AddCommand(cmd.NewConfigCmd())
|
rootCmd.AddCommand(cmd.NewConfigCmd())
|
||||||
rootCmd.AddCommand(cmd.NewCreateCmd())
|
rootCmd.AddCommand(cmd.NewCreateCmd())
|
||||||
rootCmd.AddCommand(cmd.NewInitCmd())
|
rootCmd.AddCommand(cmd.NewInitCmd())
|
||||||
@ -92,8 +91,22 @@ func signalContext(ctx context.Context, sig os.Signal) (context.Context, context
|
|||||||
return sigCtx, cancelFunc
|
return sigCtx, cancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func preRunRoot(cmd *cobra.Command, _ []string) {
|
func preRunRoot(cmd *cobra.Command, _ []string) error {
|
||||||
cmd.SilenceUsage = true
|
cmd.SilenceUsage = true
|
||||||
|
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting workspace flag: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change to workspace directory if set.
|
||||||
|
if workspace != "" {
|
||||||
|
if err := os.Chdir(workspace); err != nil {
|
||||||
|
return fmt.Errorf("changing from current directory to workspace %q: %w", workspace, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func must(err error) {
|
func must(err error) {
|
||||||
|
@ -28,7 +28,6 @@ go_library(
|
|||||||
"//internal/cloud/cloudprovider",
|
"//internal/cloud/cloudprovider",
|
||||||
"//internal/cloud/gcpshared",
|
"//internal/cloud/gcpshared",
|
||||||
"//internal/config",
|
"//internal/config",
|
||||||
"//internal/constants",
|
|
||||||
"//internal/imagefetcher",
|
"//internal/imagefetcher",
|
||||||
"//internal/role",
|
"//internal/role",
|
||||||
"@com_github_azure_azure_sdk_for_go//profiles/latest/attestation/attestation",
|
"@com_github_azure_azure_sdk_for_go//profiles/latest/attestation/attestation",
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,7 +30,7 @@ import (
|
|||||||
type Creator struct {
|
type Creator struct {
|
||||||
out io.Writer
|
out io.Writer
|
||||||
image imageFetcher
|
image imageFetcher
|
||||||
newTerraformClient func(ctx context.Context) (tfResourceClient, error)
|
newTerraformClient func(ctx context.Context, workspace string) (tfResourceClient, error)
|
||||||
newLibvirtRunner func() libvirtRunner
|
newLibvirtRunner func() libvirtRunner
|
||||||
newRawDownloader func() rawDownloader
|
newRawDownloader func() rawDownloader
|
||||||
policyPatcher policyPatcher
|
policyPatcher policyPatcher
|
||||||
@ -42,8 +41,8 @@ func NewCreator(out io.Writer) *Creator {
|
|||||||
return &Creator{
|
return &Creator{
|
||||||
out: out,
|
out: out,
|
||||||
image: imagefetcher.New(),
|
image: imagefetcher.New(),
|
||||||
newTerraformClient: func(ctx context.Context) (tfResourceClient, error) {
|
newTerraformClient: func(ctx context.Context, workspace string) (tfResourceClient, error) {
|
||||||
return terraform.New(ctx, constants.TerraformWorkingDir)
|
return terraform.New(ctx, workspace)
|
||||||
},
|
},
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
newLibvirtRunner: func() libvirtRunner {
|
||||||
return libvirt.New()
|
return libvirt.New()
|
||||||
@ -57,10 +56,11 @@ func NewCreator(out io.Writer) *Creator {
|
|||||||
|
|
||||||
// CreateOptions are the options for creating a Constellation cluster.
|
// CreateOptions are the options for creating a Constellation cluster.
|
||||||
type CreateOptions struct {
|
type CreateOptions struct {
|
||||||
Provider cloudprovider.Provider
|
Provider cloudprovider.Provider
|
||||||
Config *config.Config
|
Config *config.Config
|
||||||
image string
|
TFWorkspace string
|
||||||
TFLogLevel terraform.LogLevel
|
image string
|
||||||
|
TFLogLevel terraform.LogLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates the handed amount of instances and all the needed resources.
|
// Create creates the handed amount of instances and all the needed resources.
|
||||||
@ -74,7 +74,7 @@ func (c *Creator) Create(ctx context.Context, opts CreateOptions) (clusterid.Fil
|
|||||||
}
|
}
|
||||||
opts.image = image
|
opts.image = image
|
||||||
|
|
||||||
cl, err := c.newTerraformClient(ctx)
|
cl, err := c.newTerraformClient(ctx, opts.TFWorkspace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clusterid.File{}, err
|
return clusterid.File{}, err
|
||||||
}
|
}
|
||||||
|
@ -203,7 +203,7 @@ func TestCreator(t *testing.T) {
|
|||||||
image: &stubImageFetcher{
|
image: &stubImageFetcher{
|
||||||
reference: "some-image",
|
reference: "some-image",
|
||||||
},
|
},
|
||||||
newTerraformClient: func(ctx context.Context) (tfResourceClient, error) {
|
newTerraformClient: func(_ context.Context, _ string) (tfResourceClient, error) {
|
||||||
return tc.tfClient, tc.newTfClientErr
|
return tc.tfClient, tc.newTfClientErr
|
||||||
},
|
},
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
newLibvirtRunner: func() libvirtRunner {
|
||||||
|
@ -19,26 +19,26 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// IAMDestroyer destroys an IAM configuration.
|
// IAMDestroyer destroys an IAM configuration.
|
||||||
type IAMDestroyer struct {
|
type IAMDestroyer struct {
|
||||||
client tfIAMClient
|
newTerraformClient newTFIAMClientFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIAMDestroyer creates a new IAM Destroyer.
|
// NewIAMDestroyer creates a new IAM Destroyer.
|
||||||
func NewIAMDestroyer(ctx context.Context) (*IAMDestroyer, error) {
|
func NewIAMDestroyer() *IAMDestroyer {
|
||||||
cl, err := terraform.New(ctx, constants.TerraformIAMWorkingDir)
|
return &IAMDestroyer{newTerraformClient: newTerraformIAMClient}
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &IAMDestroyer{client: cl}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTfstateServiceAccountKey returns the sa_key output from the terraform state.
|
// GetTfStateServiceAccountKey returns the sa_key output from the terraform state.
|
||||||
func (d *IAMDestroyer) GetTfstateServiceAccountKey(ctx context.Context) (gcpshared.ServiceAccountKey, error) {
|
func (d *IAMDestroyer) GetTfStateServiceAccountKey(ctx context.Context, tfWorkspace string) (gcpshared.ServiceAccountKey, error) {
|
||||||
tfState, err := d.client.ShowIAM(ctx, cloudprovider.GCP)
|
client, err := d.newTerraformClient(ctx, tfWorkspace)
|
||||||
|
if err != nil {
|
||||||
|
return gcpshared.ServiceAccountKey{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tfState, err := client.ShowIAM(ctx, cloudprovider.GCP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return gcpshared.ServiceAccountKey{}, fmt.Errorf("getting terraform state: %w", err)
|
return gcpshared.ServiceAccountKey{}, fmt.Errorf("getting terraform state: %w", err)
|
||||||
}
|
}
|
||||||
@ -58,25 +58,31 @@ func (d *IAMDestroyer) GetTfstateServiceAccountKey(ctx context.Context) (gcpshar
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DestroyIAMConfiguration destroys the previously created IAM configuration and deletes the local IAM terraform files.
|
// DestroyIAMConfiguration destroys the previously created IAM configuration and deletes the local IAM terraform files.
|
||||||
func (d *IAMDestroyer) DestroyIAMConfiguration(ctx context.Context, logLevel terraform.LogLevel) error {
|
func (d *IAMDestroyer) DestroyIAMConfiguration(ctx context.Context, tfWorkspace string, logLevel terraform.LogLevel) error {
|
||||||
if err := d.client.Destroy(ctx, logLevel); err != nil {
|
client, err := d.newTerraformClient(ctx, tfWorkspace)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return d.client.CleanUpWorkspace()
|
|
||||||
|
if err := client.Destroy(ctx, logLevel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return client.CleanUpWorkspace()
|
||||||
}
|
}
|
||||||
|
|
||||||
// IAMCreator creates the IAM configuration on the cloud provider.
|
// IAMCreator creates the IAM configuration on the cloud provider.
|
||||||
type IAMCreator struct {
|
type IAMCreator struct {
|
||||||
out io.Writer
|
out io.Writer
|
||||||
newTerraformClient func(ctx context.Context) (tfIAMClient, error)
|
newTerraformClient newTFIAMClientFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// IAMConfigOptions holds the necessary values for IAM configuration.
|
// IAMConfigOptions holds the necessary values for IAM configuration.
|
||||||
type IAMConfigOptions struct {
|
type IAMConfigOptions struct {
|
||||||
GCP GCPIAMConfig
|
GCP GCPIAMConfig
|
||||||
Azure AzureIAMConfig
|
Azure AzureIAMConfig
|
||||||
AWS AWSIAMConfig
|
AWS AWSIAMConfig
|
||||||
TFLogLevel terraform.LogLevel
|
TFLogLevel terraform.LogLevel
|
||||||
|
TFWorkspace string
|
||||||
}
|
}
|
||||||
|
|
||||||
// GCPIAMConfig holds the necessary values for GCP IAM configuration.
|
// GCPIAMConfig holds the necessary values for GCP IAM configuration.
|
||||||
@ -103,36 +109,25 @@ type AWSIAMConfig struct {
|
|||||||
// NewIAMCreator creates a new IAM creator.
|
// NewIAMCreator creates a new IAM creator.
|
||||||
func NewIAMCreator(out io.Writer) *IAMCreator {
|
func NewIAMCreator(out io.Writer) *IAMCreator {
|
||||||
return &IAMCreator{
|
return &IAMCreator{
|
||||||
out: out,
|
out: out,
|
||||||
newTerraformClient: func(ctx context.Context) (tfIAMClient, error) {
|
newTerraformClient: newTerraformIAMClient,
|
||||||
return terraform.New(ctx, constants.TerraformIAMWorkingDir)
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create prepares and hands over the corresponding providers IAM creator.
|
// Create prepares and hands over the corresponding providers IAM creator.
|
||||||
func (c *IAMCreator) Create(ctx context.Context, provider cloudprovider.Provider, opts *IAMConfigOptions) (iamid.File, error) {
|
func (c *IAMCreator) Create(ctx context.Context, provider cloudprovider.Provider, opts *IAMConfigOptions) (iamid.File, error) {
|
||||||
|
cl, err := c.newTerraformClient(ctx, opts.TFWorkspace)
|
||||||
|
if err != nil {
|
||||||
|
return iamid.File{}, err
|
||||||
|
}
|
||||||
|
defer cl.RemoveInstaller()
|
||||||
|
|
||||||
switch provider {
|
switch provider {
|
||||||
case cloudprovider.GCP:
|
case cloudprovider.GCP:
|
||||||
cl, err := c.newTerraformClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return iamid.File{}, err
|
|
||||||
}
|
|
||||||
defer cl.RemoveInstaller()
|
|
||||||
return c.createGCP(ctx, cl, opts)
|
return c.createGCP(ctx, cl, opts)
|
||||||
case cloudprovider.Azure:
|
case cloudprovider.Azure:
|
||||||
cl, err := c.newTerraformClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return iamid.File{}, err
|
|
||||||
}
|
|
||||||
defer cl.RemoveInstaller()
|
|
||||||
return c.createAzure(ctx, cl, opts)
|
return c.createAzure(ctx, cl, opts)
|
||||||
case cloudprovider.AWS:
|
case cloudprovider.AWS:
|
||||||
cl, err := c.newTerraformClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return iamid.File{}, err
|
|
||||||
}
|
|
||||||
defer cl.RemoveInstaller()
|
|
||||||
return c.createAWS(ctx, cl, opts)
|
return c.createAWS(ctx, cl, opts)
|
||||||
default:
|
default:
|
||||||
return iamid.File{}, fmt.Errorf("unsupported cloud provider: %s", provider)
|
return iamid.File{}, fmt.Errorf("unsupported cloud provider: %s", provider)
|
||||||
@ -222,3 +217,9 @@ func (c *IAMCreator) createAWS(ctx context.Context, cl tfIAMClient, opts *IAMCon
|
|||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type newTFIAMClientFunc func(ctx context.Context, workspace string) (tfIAMClient, error)
|
||||||
|
|
||||||
|
func newTerraformIAMClient(ctx context.Context, workspace string) (tfIAMClient, error) {
|
||||||
|
return terraform.New(ctx, workspace)
|
||||||
|
}
|
||||||
|
@ -22,8 +22,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestIAMCreator(t *testing.T) {
|
func TestIAMCreator(t *testing.T) {
|
||||||
someErr := errors.New("failed")
|
|
||||||
|
|
||||||
validGCPIAMConfig := GCPIAMConfig{
|
validGCPIAMConfig := GCPIAMConfig{
|
||||||
Region: "europe-west1",
|
Region: "europe-west1",
|
||||||
Zone: "europe-west1-a",
|
Zone: "europe-west1-a",
|
||||||
@ -91,30 +89,32 @@ func TestIAMCreator(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
"new terraform client err": {
|
"new terraform client err": {
|
||||||
tfClient: &stubTerraformClient{},
|
tfClient: &stubTerraformClient{},
|
||||||
newTfClientErr: someErr,
|
newTfClientErr: assert.AnError,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
config: &IAMConfigOptions{TFWorkspace: "test"},
|
||||||
},
|
},
|
||||||
"create iam config err": {
|
"create iam config err": {
|
||||||
tfClient: &stubTerraformClient{iamOutputErr: someErr},
|
tfClient: &stubTerraformClient{iamOutputErr: assert.AnError},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
config: &IAMConfigOptions{TFWorkspace: "test"},
|
||||||
},
|
},
|
||||||
"gcp": {
|
"gcp": {
|
||||||
tfClient: &stubTerraformClient{iamOutput: validGCPIAMOutput},
|
tfClient: &stubTerraformClient{iamOutput: validGCPIAMOutput},
|
||||||
wantIAMIDFile: validGCPIAMIDFile,
|
wantIAMIDFile: validGCPIAMIDFile,
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
config: &IAMConfigOptions{GCP: validGCPIAMConfig},
|
config: &IAMConfigOptions{GCP: validGCPIAMConfig, TFWorkspace: "test"},
|
||||||
},
|
},
|
||||||
"azure": {
|
"azure": {
|
||||||
tfClient: &stubTerraformClient{iamOutput: validAzureIAMOutput},
|
tfClient: &stubTerraformClient{iamOutput: validAzureIAMOutput},
|
||||||
wantIAMIDFile: validAzureIAMIDFile,
|
wantIAMIDFile: validAzureIAMIDFile,
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
config: &IAMConfigOptions{Azure: validAzureIAMConfig},
|
config: &IAMConfigOptions{Azure: validAzureIAMConfig, TFWorkspace: "test"},
|
||||||
},
|
},
|
||||||
"aws": {
|
"aws": {
|
||||||
tfClient: &stubTerraformClient{iamOutput: validAWSIAMOutput},
|
tfClient: &stubTerraformClient{iamOutput: validAWSIAMOutput},
|
||||||
wantIAMIDFile: validAWSIAMIDFile,
|
wantIAMIDFile: validAWSIAMIDFile,
|
||||||
provider: cloudprovider.AWS,
|
provider: cloudprovider.AWS,
|
||||||
config: &IAMConfigOptions{AWS: validAWSIAMConfig},
|
config: &IAMConfigOptions{AWS: validAWSIAMConfig, TFWorkspace: "test"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,7 +124,7 @@ func TestIAMCreator(t *testing.T) {
|
|||||||
|
|
||||||
creator := &IAMCreator{
|
creator := &IAMCreator{
|
||||||
out: &bytes.Buffer{},
|
out: &bytes.Buffer{},
|
||||||
newTerraformClient: func(ctx context.Context) (tfIAMClient, error) {
|
newTerraformClient: func(_ context.Context, _ string) (tfIAMClient, error) {
|
||||||
return tc.tfClient, tc.newTfClientErr
|
return tc.tfClient, tc.newTfClientErr
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -181,9 +181,11 @@ func TestDestroyIAMConfiguration(t *testing.T) {
|
|||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
destroyer := &IAMDestroyer{client: tc.tfClient}
|
destroyer := &IAMDestroyer{newTerraformClient: func(_ context.Context, _ string) (tfIAMClient, error) {
|
||||||
|
return tc.tfClient, nil
|
||||||
|
}}
|
||||||
|
|
||||||
err := destroyer.DestroyIAMConfiguration(context.Background(), terraform.LogLevelNone)
|
err := destroyer.DestroyIAMConfiguration(context.Background(), "", terraform.LogLevelNone)
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
@ -198,8 +200,6 @@ func TestDestroyIAMConfiguration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetTfstateServiceAccountKey(t *testing.T) {
|
func TestGetTfstateServiceAccountKey(t *testing.T) {
|
||||||
someError := errors.New("failed")
|
|
||||||
|
|
||||||
gcpFile := `
|
gcpFile := `
|
||||||
{
|
{
|
||||||
"auth_provider_x509_cert_url": "",
|
"auth_provider_x509_cert_url": "",
|
||||||
@ -235,7 +235,7 @@ func TestGetTfstateServiceAccountKey(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"show error": {
|
"show error": {
|
||||||
cl: &stubTerraformClient{
|
cl: &stubTerraformClient{
|
||||||
showErr: someError,
|
showErr: assert.AnError,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
wantShowCalled: true,
|
wantShowCalled: true,
|
||||||
@ -275,9 +275,11 @@ func TestGetTfstateServiceAccountKey(t *testing.T) {
|
|||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
destroyer := IAMDestroyer{client: tc.cl}
|
destroyer := IAMDestroyer{newTerraformClient: func(_ context.Context, _ string) (tfIAMClient, error) {
|
||||||
|
return tc.cl, nil
|
||||||
|
}}
|
||||||
|
|
||||||
saKey, err := destroyer.GetTfstateServiceAccountKey(context.Background())
|
saKey, err := destroyer.GetTfStateServiceAccountKey(context.Background(), "")
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
@ -11,20 +11,19 @@ import (
|
|||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
|
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Terminator deletes cloud provider resources.
|
// Terminator deletes cloud provider resources.
|
||||||
type Terminator struct {
|
type Terminator struct {
|
||||||
newTerraformClient func(ctx context.Context) (tfResourceClient, error)
|
newTerraformClient func(ctx context.Context, tfWorkspace string) (tfResourceClient, error)
|
||||||
newLibvirtRunner func() libvirtRunner
|
newLibvirtRunner func() libvirtRunner
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTerminator create a new cloud terminator.
|
// NewTerminator create a new cloud terminator.
|
||||||
func NewTerminator() *Terminator {
|
func NewTerminator() *Terminator {
|
||||||
return &Terminator{
|
return &Terminator{
|
||||||
newTerraformClient: func(ctx context.Context) (tfResourceClient, error) {
|
newTerraformClient: func(ctx context.Context, tfWorkspace string) (tfResourceClient, error) {
|
||||||
return terraform.New(ctx, constants.TerraformWorkingDir)
|
return terraform.New(ctx, tfWorkspace)
|
||||||
},
|
},
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
newLibvirtRunner: func() libvirtRunner {
|
||||||
return libvirt.New()
|
return libvirt.New()
|
||||||
@ -33,14 +32,14 @@ func NewTerminator() *Terminator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Terminate deletes the could provider resources.
|
// Terminate deletes the could provider resources.
|
||||||
func (t *Terminator) Terminate(ctx context.Context, logLevel terraform.LogLevel) (retErr error) {
|
func (t *Terminator) Terminate(ctx context.Context, tfWorkspace string, logLevel terraform.LogLevel) (retErr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if retErr == nil {
|
if retErr == nil {
|
||||||
retErr = t.newLibvirtRunner().Stop(ctx)
|
retErr = t.newLibvirtRunner().Stop(ctx)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
cl, err := t.newTerraformClient(ctx)
|
cl, err := t.newTerraformClient(ctx, tfWorkspace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ func TestTerminator(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
terminator := &Terminator{
|
terminator := &Terminator{
|
||||||
newTerraformClient: func(ctx context.Context) (tfResourceClient, error) {
|
newTerraformClient: func(_ context.Context, _ string) (tfResourceClient, error) {
|
||||||
return tc.tfClient, tc.newTfClientErr
|
return tc.tfClient, tc.newTfClientErr
|
||||||
},
|
},
|
||||||
newLibvirtRunner: func() libvirtRunner {
|
newLibvirtRunner: func() libvirtRunner {
|
||||||
@ -63,7 +63,7 @@ func TestTerminator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := terminator.Terminate(context.Background(), terraform.LogLevelNone)
|
err := terminator.Terminate(context.Background(), "", terraform.LogLevelNone)
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
@ -36,6 +36,7 @@ go_library(
|
|||||||
"validargs.go",
|
"validargs.go",
|
||||||
"verify.go",
|
"verify.go",
|
||||||
"version.go",
|
"version.go",
|
||||||
|
"workspace.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/cmd",
|
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/cmd",
|
||||||
visibility = ["//cli:__subpackages__"],
|
visibility = ["//cli:__subpackages__"],
|
||||||
@ -169,7 +170,6 @@ go_test(
|
|||||||
"@com_github_spf13_cobra//:cobra",
|
"@com_github_spf13_cobra//:cobra",
|
||||||
"@com_github_stretchr_testify//assert",
|
"@com_github_stretchr_testify//assert",
|
||||||
"@com_github_stretchr_testify//require",
|
"@com_github_stretchr_testify//require",
|
||||||
"@in_gopkg_yaml_v3//:yaml_v3",
|
|
||||||
"@io_k8s_api//core/v1:core",
|
"@io_k8s_api//core/v1:core",
|
||||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||||
|
@ -33,10 +33,10 @@ type cloudIAMCreator interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type iamDestroyer interface {
|
type iamDestroyer interface {
|
||||||
DestroyIAMConfiguration(ctx context.Context, logLevel terraform.LogLevel) error
|
DestroyIAMConfiguration(ctx context.Context, tfWorkspace string, logLevel terraform.LogLevel) error
|
||||||
GetTfstateServiceAccountKey(ctx context.Context) (gcpshared.ServiceAccountKey, error)
|
GetTfStateServiceAccountKey(ctx context.Context, tfWorkspace string) (gcpshared.ServiceAccountKey, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type cloudTerminator interface {
|
type cloudTerminator interface {
|
||||||
Terminate(ctx context.Context, logLevel terraform.LogLevel) error
|
Terminate(ctx context.Context, workspace string, logLevel terraform.LogLevel) error
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ type stubCloudTerminator struct {
|
|||||||
terminateErr error
|
terminateErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stubCloudTerminator) Terminate(_ context.Context, _ terraform.LogLevel) error {
|
func (c *stubCloudTerminator) Terminate(_ context.Context, _ string, _ terraform.LogLevel) error {
|
||||||
c.called = true
|
c.called = true
|
||||||
return c.terminateErr
|
return c.terminateErr
|
||||||
}
|
}
|
||||||
@ -73,18 +73,18 @@ func (c *stubIAMCreator) Create(
|
|||||||
|
|
||||||
type stubIAMDestroyer struct {
|
type stubIAMDestroyer struct {
|
||||||
destroyCalled bool
|
destroyCalled bool
|
||||||
getTfstateKeyCalled bool
|
getTfStateKeyCalled bool
|
||||||
gcpSaKey gcpshared.ServiceAccountKey
|
gcpSaKey gcpshared.ServiceAccountKey
|
||||||
destroyErr error
|
destroyErr error
|
||||||
getTfstateKeyErr error
|
getTfStateKeyErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *stubIAMDestroyer) DestroyIAMConfiguration(_ context.Context, _ terraform.LogLevel) error {
|
func (d *stubIAMDestroyer) DestroyIAMConfiguration(_ context.Context, _ string, _ terraform.LogLevel) error {
|
||||||
d.destroyCalled = true
|
d.destroyCalled = true
|
||||||
return d.destroyErr
|
return d.destroyErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *stubIAMDestroyer) GetTfstateServiceAccountKey(_ context.Context) (gcpshared.ServiceAccountKey, error) {
|
func (d *stubIAMDestroyer) GetTfStateServiceAccountKey(_ context.Context, _ string) (gcpshared.ServiceAccountKey, error) {
|
||||||
d.getTfstateKeyCalled = true
|
d.getTfStateKeyCalled = true
|
||||||
return d.gcpSaKey, d.getTfstateKeyErr
|
return d.gcpSaKey, d.getTfStateKeyErr
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/featureset"
|
"github.com/edgelesssys/constellation/v2/cli/internal/featureset"
|
||||||
@ -19,6 +20,7 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/sigstore"
|
"github.com/edgelesssys/constellation/v2/internal/sigstore"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/sigstore/keyselect"
|
"github.com/edgelesssys/constellation/v2/internal/sigstore/keyselect"
|
||||||
@ -47,8 +49,8 @@ type fetchMeasurementsFlags struct {
|
|||||||
measurementsURL *url.URL
|
measurementsURL *url.URL
|
||||||
signatureURL *url.URL
|
signatureURL *url.URL
|
||||||
insecure bool
|
insecure bool
|
||||||
configPath string
|
|
||||||
force bool
|
force bool
|
||||||
|
workspace string
|
||||||
}
|
}
|
||||||
|
|
||||||
type configFetchMeasurementsCmd struct {
|
type configFetchMeasurementsCmd struct {
|
||||||
@ -88,9 +90,9 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
|
|||||||
return errors.New("fetching measurements is not supported")
|
return errors.New("fetching measurements is not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
cfm.log.Debugf("Loading configuration file from %q", flags.configPath)
|
cfm.log.Debugf("Loading configuration file from %q", filepath.Join(flags.workspace, constants.ConfigFilename))
|
||||||
|
|
||||||
conf, err := config.New(fileHandler, flags.configPath, fetcher, flags.force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -168,10 +170,10 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
|
|||||||
|
|
||||||
cfm.log.Debugf("Updating measurements in configuration")
|
cfm.log.Debugf("Updating measurements in configuration")
|
||||||
conf.UpdateMeasurements(fetchedMeasurements)
|
conf.UpdateMeasurements(fetchedMeasurements)
|
||||||
if err := fileHandler.WriteYAML(flags.configPath, conf, file.OptOverwrite); err != nil {
|
if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cfm.log.Debugf("Configuration written to %s", flags.configPath)
|
cfm.log.Debugf("Configuration written to %s", configPath(flags.workspace))
|
||||||
cmd.Print("Successfully fetched measurements and updated Configuration\n")
|
cmd.Print("Successfully fetched measurements and updated Configuration\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -192,41 +194,39 @@ func (cfm *configFetchMeasurementsCmd) parseURLFlag(cmd *cobra.Command, flag str
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cfm *configFetchMeasurementsCmd) parseFetchMeasurementsFlags(cmd *cobra.Command) (*fetchMeasurementsFlags, error) {
|
func (cfm *configFetchMeasurementsCmd) parseFetchMeasurementsFlags(cmd *cobra.Command) (*fetchMeasurementsFlags, error) {
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing workspace argument: %w", err)
|
||||||
|
}
|
||||||
measurementsURL, err := cfm.parseURLFlag(cmd, "url")
|
measurementsURL, err := cfm.parseURLFlag(cmd, "url")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &fetchMeasurementsFlags{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cfm.log.Debugf("Parsed measurements URL as %v", measurementsURL)
|
cfm.log.Debugf("Parsed measurements URL as %v", measurementsURL)
|
||||||
|
|
||||||
measurementsSignatureURL, err := cfm.parseURLFlag(cmd, "signature-url")
|
measurementsSignatureURL, err := cfm.parseURLFlag(cmd, "signature-url")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &fetchMeasurementsFlags{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cfm.log.Debugf("Parsed measurements signature URL as %v", measurementsSignatureURL)
|
cfm.log.Debugf("Parsed measurements signature URL as %v", measurementsSignatureURL)
|
||||||
|
|
||||||
insecure, err := cmd.Flags().GetBool("insecure")
|
insecure, err := cmd.Flags().GetBool("insecure")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &fetchMeasurementsFlags{}, fmt.Errorf("parsing insecure argument: %w", err)
|
return nil, fmt.Errorf("parsing insecure argument: %w", err)
|
||||||
}
|
}
|
||||||
cfm.log.Debugf("Insecure flag is %v", insecure)
|
cfm.log.Debugf("Insecure flag is %v", insecure)
|
||||||
|
|
||||||
config, err := cmd.Flags().GetString("config")
|
|
||||||
if err != nil {
|
|
||||||
return &fetchMeasurementsFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
|
||||||
}
|
|
||||||
cfm.log.Debugf("Configuration path is %q", config)
|
|
||||||
|
|
||||||
force, err := cmd.Flags().GetBool("force")
|
force, err := cmd.Flags().GetBool("force")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &fetchMeasurementsFlags{}, fmt.Errorf("parsing force argument: %w", err)
|
return nil, fmt.Errorf("parsing force argument: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &fetchMeasurementsFlags{
|
return &fetchMeasurementsFlags{
|
||||||
measurementsURL: measurementsURL,
|
measurementsURL: measurementsURL,
|
||||||
signatureURL: measurementsSignatureURL,
|
signatureURL: measurementsSignatureURL,
|
||||||
insecure: insecure,
|
insecure: insecure,
|
||||||
configPath: config,
|
|
||||||
force: force,
|
force: force,
|
||||||
|
workspace: workspace,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@ func TestParseFetchMeasurementsFlags(t *testing.T) {
|
|||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
urlFlag string
|
urlFlag string
|
||||||
signatureURLFlag string
|
signatureURLFlag string
|
||||||
configFlag string
|
|
||||||
forceFlag bool
|
forceFlag bool
|
||||||
wantFlags *fetchMeasurementsFlags
|
wantFlags *fetchMeasurementsFlags
|
||||||
wantErr bool
|
wantErr bool
|
||||||
@ -48,7 +47,6 @@ func TestParseFetchMeasurementsFlags(t *testing.T) {
|
|||||||
wantFlags: &fetchMeasurementsFlags{
|
wantFlags: &fetchMeasurementsFlags{
|
||||||
measurementsURL: nil,
|
measurementsURL: nil,
|
||||||
signatureURL: nil,
|
signatureURL: nil,
|
||||||
configPath: constants.ConfigFilename,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"url": {
|
"url": {
|
||||||
@ -57,19 +55,12 @@ func TestParseFetchMeasurementsFlags(t *testing.T) {
|
|||||||
wantFlags: &fetchMeasurementsFlags{
|
wantFlags: &fetchMeasurementsFlags{
|
||||||
measurementsURL: urlMustParse("https://some.other.url/with/path"),
|
measurementsURL: urlMustParse("https://some.other.url/with/path"),
|
||||||
signatureURL: urlMustParse("https://some.other.url/with/path.sig"),
|
signatureURL: urlMustParse("https://some.other.url/with/path.sig"),
|
||||||
configPath: constants.ConfigFilename,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"broken url": {
|
"broken url": {
|
||||||
urlFlag: "%notaurl%",
|
urlFlag: "%notaurl%",
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"config": {
|
|
||||||
configFlag: "someOtherConfig.yaml",
|
|
||||||
wantFlags: &fetchMeasurementsFlags{
|
|
||||||
configPath: "someOtherConfig.yaml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
@ -78,8 +69,8 @@ func TestParseFetchMeasurementsFlags(t *testing.T) {
|
|||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
cmd := newConfigFetchMeasurementsCmd()
|
cmd := newConfigFetchMeasurementsCmd()
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", false, "") // register persistent flag manually
|
cmd.Flags().Bool("force", false, "") // register persistent flag manually
|
||||||
|
|
||||||
if tc.urlFlag != "" {
|
if tc.urlFlag != "" {
|
||||||
require.NoError(cmd.Flags().Set("url", tc.urlFlag))
|
require.NoError(cmd.Flags().Set("url", tc.urlFlag))
|
||||||
@ -87,9 +78,6 @@ func TestParseFetchMeasurementsFlags(t *testing.T) {
|
|||||||
if tc.signatureURLFlag != "" {
|
if tc.signatureURLFlag != "" {
|
||||||
require.NoError(cmd.Flags().Set("signature-url", tc.signatureURLFlag))
|
require.NoError(cmd.Flags().Set("signature-url", tc.signatureURLFlag))
|
||||||
}
|
}
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
cfm := &configFetchMeasurementsCmd{log: logger.NewTest(t)}
|
cfm := &configFetchMeasurementsCmd{log: logger.NewTest(t)}
|
||||||
flags, err := cfm.parseFetchMeasurementsFlags(cmd)
|
flags, err := cfm.parseFetchMeasurementsFlags(cmd)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
@ -283,8 +271,8 @@ func TestConfigFetchMeasurements(t *testing.T) {
|
|||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
cmd := newConfigFetchMeasurementsCmd()
|
cmd := newConfigFetchMeasurementsCmd()
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
require.NoError(cmd.Flags().Set("insecure", strconv.FormatBool(tc.insecureFlag)))
|
require.NoError(cmd.Flags().Set("insecure", strconv.FormatBool(tc.insecureFlag)))
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
"github.com/siderolabs/talos/pkg/machinery/config/encoder"
|
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/mod/semver"
|
"golang.org/x/mod/semver"
|
||||||
@ -35,7 +34,6 @@ func newConfigGenerateCmd() *cobra.Command {
|
|||||||
ValidArgsFunction: generateCompletion,
|
ValidArgsFunction: generateCompletion,
|
||||||
RunE: runConfigGenerate,
|
RunE: runConfigGenerate,
|
||||||
}
|
}
|
||||||
cmd.Flags().StringP("file", "f", constants.ConfigFilename, "path to output file, or '-' for stdout")
|
|
||||||
cmd.Flags().StringP("kubernetes", "k", semver.MajorMinor(config.Default().KubernetesVersion), "Kubernetes version to use in format MAJOR.MINOR")
|
cmd.Flags().StringP("kubernetes", "k", semver.MajorMinor(config.Default().KubernetesVersion), "Kubernetes version to use in format MAJOR.MINOR")
|
||||||
cmd.Flags().StringP("attestation", "a", "", fmt.Sprintf("attestation variant to use %s. If not specified, the default for the cloud provider is used", printFormattedSlice(variant.GetAvailableAttestationVariants())))
|
cmd.Flags().StringP("attestation", "a", "", fmt.Sprintf("attestation variant to use %s. If not specified, the default for the cloud provider is used", printFormattedSlice(variant.GetAvailableAttestationVariants())))
|
||||||
|
|
||||||
@ -43,7 +41,7 @@ func newConfigGenerateCmd() *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type generateFlags struct {
|
type generateFlags struct {
|
||||||
file string
|
workspace string
|
||||||
k8sVersion string
|
k8sVersion string
|
||||||
attestationVariant variant.Variant
|
attestationVariant variant.Variant
|
||||||
}
|
}
|
||||||
@ -77,23 +75,12 @@ func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file
|
|||||||
return fmt.Errorf("creating config: %w", err)
|
return fmt.Errorf("creating config: %w", err)
|
||||||
}
|
}
|
||||||
conf.KubernetesVersion = flags.k8sVersion
|
conf.KubernetesVersion = flags.k8sVersion
|
||||||
if flags.file == "-" {
|
|
||||||
content, err := encoder.NewEncoder(conf).Encode()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encoding config content: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cg.log.Debugf("Writing YAML data to stdout")
|
|
||||||
_, err = cmd.OutOrStdout().Write(content)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cg.log.Debugf("Writing YAML data to configuration file")
|
cg.log.Debugf("Writing YAML data to configuration file")
|
||||||
if err := fileHandler.WriteYAML(flags.file, conf, file.OptMkdirAll); err != nil {
|
if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptMkdirAll); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Println("Config file written to", flags.file)
|
cmd.Println("Config file written to", configPath(flags.workspace))
|
||||||
cmd.Println("Please fill in your CSP-specific configuration before proceeding.")
|
cmd.Println("Please fill in your CSP-specific configuration before proceeding.")
|
||||||
cmd.Println("For more information refer to the documentation:")
|
cmd.Println("For more information refer to the documentation:")
|
||||||
cmd.Println("\thttps://docs.edgeless.systems/constellation/getting-started/first-steps")
|
cmd.Println("\thttps://docs.edgeless.systems/constellation/getting-started/first-steps")
|
||||||
@ -150,9 +137,9 @@ func supportedVersions() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseGenerateFlags(cmd *cobra.Command) (generateFlags, error) {
|
func parseGenerateFlags(cmd *cobra.Command) (generateFlags, error) {
|
||||||
file, err := cmd.Flags().GetString("file")
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return generateFlags{}, fmt.Errorf("parsing file flag: %w", err)
|
return generateFlags{}, fmt.Errorf("parsing workspace flag: %w", err)
|
||||||
}
|
}
|
||||||
k8sVersion, err := cmd.Flags().GetString("kubernetes")
|
k8sVersion, err := cmd.Flags().GetString("kubernetes")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -179,7 +166,7 @@ func parseGenerateFlags(cmd *cobra.Command) (generateFlags, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return generateFlags{
|
return generateFlags{
|
||||||
file: file,
|
workspace: workspace,
|
||||||
k8sVersion: resolvedVersion,
|
k8sVersion: resolvedVersion,
|
||||||
attestationVariant: attestationVariant,
|
attestationVariant: attestationVariant,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -23,7 +22,6 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/mod/semver"
|
"golang.org/x/mod/semver"
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfigGenerateKubernetesVersion(t *testing.T) {
|
func TestConfigGenerateKubernetesVersion(t *testing.T) {
|
||||||
@ -51,6 +49,7 @@ func TestConfigGenerateKubernetesVersion(t *testing.T) {
|
|||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
cmd := newConfigGenerateCmd()
|
cmd := newConfigGenerateCmd()
|
||||||
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
err := cmd.Flags().Set("kubernetes", tc.version)
|
err := cmd.Flags().Set("kubernetes", tc.version)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
@ -72,6 +71,7 @@ func TestConfigGenerateDefault(t *testing.T) {
|
|||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
cmd := newConfigGenerateCmd()
|
cmd := newConfigGenerateCmd()
|
||||||
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
|
|
||||||
cg := &configGenerateCmd{log: logger.NewTest(t)}
|
cg := &configGenerateCmd{log: logger.NewTest(t)}
|
||||||
require.NoError(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))
|
require.NoError(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))
|
||||||
@ -97,6 +97,7 @@ func TestConfigGenerateDefaultProviderSpecific(t *testing.T) {
|
|||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
cmd := newConfigGenerateCmd()
|
cmd := newConfigGenerateCmd()
|
||||||
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
|
|
||||||
wantConf := config.Default()
|
wantConf := config.Default()
|
||||||
wantConf.RemoveProviderAndAttestationExcept(provider)
|
wantConf.RemoveProviderAndAttestationExcept(provider)
|
||||||
@ -122,6 +123,7 @@ func TestConfigGenerateWithStackIt(t *testing.T) {
|
|||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
cmd := newConfigGenerateCmd()
|
cmd := newConfigGenerateCmd()
|
||||||
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
|
|
||||||
wantConf := config.Default().WithOpenStackProviderDefaults(openStackProvider)
|
wantConf := config.Default().WithOpenStackProviderDefaults(openStackProvider)
|
||||||
wantConf.RemoveProviderAndAttestationExcept(cloudprovider.OpenStack)
|
wantConf.RemoveProviderAndAttestationExcept(cloudprovider.OpenStack)
|
||||||
@ -143,42 +145,12 @@ func TestConfigGenerateDefaultExists(t *testing.T) {
|
|||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fileHandler.Write(constants.ConfigFilename, []byte("foobar"), file.OptNone))
|
require.NoError(fileHandler.Write(constants.ConfigFilename, []byte("foobar"), file.OptNone))
|
||||||
cmd := newConfigGenerateCmd()
|
cmd := newConfigGenerateCmd()
|
||||||
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
|
|
||||||
cg := &configGenerateCmd{log: logger.NewTest(t)}
|
cg := &configGenerateCmd{log: logger.NewTest(t)}
|
||||||
require.Error(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))
|
require.Error(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigGenerateFileFlagRemoved(t *testing.T) {
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
|
||||||
cmd := newConfigGenerateCmd()
|
|
||||||
cmd.ResetFlags()
|
|
||||||
|
|
||||||
cg := &configGenerateCmd{log: logger.NewTest(t)}
|
|
||||||
require.Error(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigGenerateStdOut(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
|
||||||
|
|
||||||
var outBuffer bytes.Buffer
|
|
||||||
cmd := newConfigGenerateCmd()
|
|
||||||
cmd.SetOut(&outBuffer)
|
|
||||||
require.NoError(cmd.Flags().Set("file", "-"))
|
|
||||||
|
|
||||||
cg := &configGenerateCmd{log: logger.NewTest(t)}
|
|
||||||
require.NoError(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, ""))
|
|
||||||
|
|
||||||
var readConfig config.Config
|
|
||||||
require.NoError(yaml.NewDecoder(&outBuffer).Decode(&readConfig))
|
|
||||||
|
|
||||||
assert.Equal(*config.Default(), readConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoValidProviderAttestationCombination(t *testing.T) {
|
func TestNoValidProviderAttestationCombination(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -294,6 +266,7 @@ func TestAttestationArgument(t *testing.T) {
|
|||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
cmd := newConfigGenerateCmd()
|
cmd := newConfigGenerateCmd()
|
||||||
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
require.NoError(test.setFlag(cmd))
|
require.NoError(test.setFlag(cmd))
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -28,19 +29,15 @@ func newConfigMigrateCmd() *cobra.Command {
|
|||||||
|
|
||||||
func runConfigMigrate(cmd *cobra.Command, _ []string) error {
|
func runConfigMigrate(cmd *cobra.Command, _ []string) error {
|
||||||
handler := file.NewHandler(afero.NewOsFs())
|
handler := file.NewHandler(afero.NewOsFs())
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
return configMigrate(cmd, handler)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("parsing config path flag: %w", err)
|
|
||||||
}
|
|
||||||
return configMigrate(cmd, configPath, handler)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func configMigrate(cmd *cobra.Command, configPath string, handler file.Handler) error {
|
func configMigrate(cmd *cobra.Command, handler file.Handler) error {
|
||||||
// Make sure we are reading a v2 config
|
// Make sure we are reading a v2 config
|
||||||
var cfgVersion struct {
|
var cfgVersion struct {
|
||||||
Version string `yaml:"version"`
|
Version string `yaml:"version"`
|
||||||
}
|
}
|
||||||
if err := handler.ReadYAML(configPath, &cfgVersion); err != nil {
|
if err := handler.ReadYAML(constants.ConfigFilename, &cfgVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,12 +71,12 @@ func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.log.Debugf("Using flags: %+v", flags)
|
c.log.Debugf("Using flags: %+v", flags)
|
||||||
if err := c.checkDirClean(fileHandler); err != nil {
|
if err := c.checkDirClean(flags.workspace, fileHandler); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debugf("Loading configuration file from %q", flags.configPath)
|
c.log.Debugf("Loading configuration file from %q", configPath(flags.workspace))
|
||||||
conf, err := config.New(fileHandler, flags.configPath, fetcher, flags.force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, flags.force)
|
||||||
c.log.Debugf("Configuration file loaded: %+v", conf)
|
c.log.Debugf("Configuration file loaded: %+v", conf)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
@ -160,18 +160,19 @@ func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler
|
|||||||
|
|
||||||
spinner.Start("Creating", false)
|
spinner.Start("Creating", false)
|
||||||
opts := cloudcmd.CreateOptions{
|
opts := cloudcmd.CreateOptions{
|
||||||
Provider: provider,
|
Provider: provider,
|
||||||
Config: conf,
|
Config: conf,
|
||||||
TFLogLevel: flags.tfLogLevel,
|
TFLogLevel: flags.tfLogLevel,
|
||||||
|
TFWorkspace: constants.TerraformWorkingDir,
|
||||||
}
|
}
|
||||||
idFile, err := creator.Create(cmd.Context(), opts)
|
idFile, err := creator.Create(cmd.Context(), opts)
|
||||||
spinner.Stop()
|
spinner.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return translateCreateErrors(cmd, err)
|
return translateCreateErrors(cmd, flags.workspace, err)
|
||||||
}
|
}
|
||||||
c.log.Debugf("Successfully created the cloud resources for the cluster")
|
c.log.Debugf("Successfully created the cloud resources for the cluster")
|
||||||
|
|
||||||
if err := fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile, file.OptNone); err != nil {
|
if err := fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,11 +188,11 @@ func (c *createCmd) parseCreateFlags(cmd *cobra.Command) (createFlags, error) {
|
|||||||
}
|
}
|
||||||
c.log.Debugf("Yes flag is %t", yes)
|
c.log.Debugf("Yes flag is %t", yes)
|
||||||
|
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return createFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
return createFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
||||||
}
|
}
|
||||||
c.log.Debugf("Configuration path flag is %q", configPath)
|
c.log.Debugf("Workspace set to %q", workspace)
|
||||||
|
|
||||||
force, err := cmd.Flags().GetBool("force")
|
force, err := cmd.Flags().GetBool("force")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -207,10 +208,10 @@ func (c *createCmd) parseCreateFlags(cmd *cobra.Command) (createFlags, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return createFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
return createFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
||||||
}
|
}
|
||||||
c.log.Debugf("Terraform logs will be written into %s at level %s", constants.TerraformLogFile, logLevel.String())
|
c.log.Debugf("Terraform logs will be written into %s at level %s", terraformLogPath(workspace), logLevel.String())
|
||||||
|
|
||||||
return createFlags{
|
return createFlags{
|
||||||
configPath: configPath,
|
workspace: workspace,
|
||||||
tfLogLevel: logLevel,
|
tfLogLevel: logLevel,
|
||||||
force: force,
|
force: force,
|
||||||
yes: yes,
|
yes: yes,
|
||||||
@ -219,44 +220,44 @@ func (c *createCmd) parseCreateFlags(cmd *cobra.Command) (createFlags, error) {
|
|||||||
|
|
||||||
// createFlags contains the parsed flags of the create command.
|
// createFlags contains the parsed flags of the create command.
|
||||||
type createFlags struct {
|
type createFlags struct {
|
||||||
configPath string
|
workspace string
|
||||||
tfLogLevel terraform.LogLevel
|
tfLogLevel terraform.LogLevel
|
||||||
force bool
|
force bool
|
||||||
yes bool
|
yes bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkDirClean checks if files of a previous Constellation are left in the current working dir.
|
// checkDirClean checks if files of a previous Constellation are left in the current working dir.
|
||||||
func (c *createCmd) checkDirClean(fileHandler file.Handler) error {
|
func (c *createCmd) checkDirClean(workspace string, fileHandler file.Handler) error {
|
||||||
c.log.Debugf("Checking admin configuration file")
|
c.log.Debugf("Checking admin configuration file")
|
||||||
if _, err := fileHandler.Stat(constants.AdminConfFilename); !errors.Is(err, fs.ErrNotExist) {
|
if _, err := fileHandler.Stat(constants.AdminConfFilename); !errors.Is(err, fs.ErrNotExist) {
|
||||||
return fmt.Errorf("file '%s' already exists in working directory, run 'constellation terminate' before creating a new one", constants.AdminConfFilename)
|
return fmt.Errorf("file '%s' already exists in working directory, run 'constellation terminate' before creating a new one", adminConfPath(workspace))
|
||||||
}
|
}
|
||||||
c.log.Debugf("Checking master secrets file")
|
c.log.Debugf("Checking master secrets file")
|
||||||
if _, err := fileHandler.Stat(constants.MasterSecretFilename); !errors.Is(err, fs.ErrNotExist) {
|
if _, err := fileHandler.Stat(constants.MasterSecretFilename); !errors.Is(err, fs.ErrNotExist) {
|
||||||
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", constants.MasterSecretFilename)
|
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", masterSecretPath(workspace))
|
||||||
}
|
}
|
||||||
c.log.Debugf("Checking cluster IDs file")
|
c.log.Debugf("Checking cluster IDs file")
|
||||||
if _, err := fileHandler.Stat(constants.ClusterIDsFileName); !errors.Is(err, fs.ErrNotExist) {
|
if _, err := fileHandler.Stat(constants.ClusterIDsFilename); !errors.Is(err, fs.ErrNotExist) {
|
||||||
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous cluster IDs. Move it somewhere or delete it before creating a new cluster", constants.ClusterIDsFileName)
|
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous cluster IDs. Move it somewhere or delete it before creating a new cluster", clusterIDsPath(workspace))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func translateCreateErrors(cmd *cobra.Command, err error) error {
|
func translateCreateErrors(cmd *cobra.Command, workspace string, err error) error {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, terraform.ErrTerraformWorkspaceDifferentFiles):
|
case errors.Is(err, terraform.ErrTerraformWorkspaceDifferentFiles):
|
||||||
cmd.PrintErrln("\nYour current working directory contains an existing Terraform workspace which does not match the expected state.")
|
cmd.PrintErrln("\nYour current working directory contains an existing Terraform workspace which does not match the expected state.")
|
||||||
cmd.PrintErrln("This can be due to a mix up between providers, versions or an otherwise corrupted workspace.")
|
cmd.PrintErrln("This can be due to a mix up between providers, versions or an otherwise corrupted workspace.")
|
||||||
cmd.PrintErrln("Before creating a new cluster, try \"constellation terminate\".")
|
cmd.PrintErrln("Before creating a new cluster, try \"constellation terminate\".")
|
||||||
cmd.PrintErrf("If this does not work, either move or delete the directory %q.\n", constants.TerraformWorkingDir)
|
cmd.PrintErrf("If this does not work, either move or delete the directory %q.\n", terraformClusterWorkspace(workspace))
|
||||||
cmd.PrintErrln("Please only delete the directory if you made sure that all created cloud resources have been terminated.")
|
cmd.PrintErrln("Please only delete the directory if you made sure that all created cloud resources have been terminated.")
|
||||||
return err
|
return err
|
||||||
case errors.Is(err, terraform.ErrTerraformWorkspaceExistsWithDifferentVariables):
|
case errors.Is(err, terraform.ErrTerraformWorkspaceExistsWithDifferentVariables):
|
||||||
cmd.PrintErrln("\nYour current working directory contains an existing Terraform workspace which was initiated with different input variables.")
|
cmd.PrintErrln("\nYour current working directory contains an existing Terraform workspace which was initiated with different input variables.")
|
||||||
cmd.PrintErrln("This can be the case if you have tried to create a cluster before with different options which did not complete, or the workspace is corrupted.")
|
cmd.PrintErrln("This can be the case if you have tried to create a cluster before with different options which did not complete, or the workspace is corrupted.")
|
||||||
cmd.PrintErrln("Before creating a new cluster, try \"constellation terminate\".")
|
cmd.PrintErrln("Before creating a new cluster, try \"constellation terminate\".")
|
||||||
cmd.PrintErrf("If this does not work, either move or delete the directory %q.\n", constants.TerraformWorkingDir)
|
cmd.PrintErrf("If this does not work, either move or delete the directory %q.\n", terraformClusterWorkspace(workspace))
|
||||||
cmd.PrintErrln("Please only delete the directory if you made sure that all created cloud resources have been terminated.")
|
cmd.PrintErrln("Please only delete the directory if you made sure that all created cloud resources have been terminated.")
|
||||||
return err
|
return err
|
||||||
default:
|
default:
|
||||||
|
@ -41,7 +41,6 @@ func TestCreate(t *testing.T) {
|
|||||||
yesFlag bool
|
yesFlag bool
|
||||||
controllerCountFlag *int
|
controllerCountFlag *int
|
||||||
workerCountFlag *int
|
workerCountFlag *int
|
||||||
configFlag string
|
|
||||||
stdin string
|
stdin string
|
||||||
wantErr bool
|
wantErr bool
|
||||||
wantAbort bool
|
wantAbort bool
|
||||||
@ -141,13 +140,12 @@ func TestCreate(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"config does not exist": {
|
"config does not exist": {
|
||||||
setupFs: fsWithDefaultConfig,
|
setupFs: func(a *require.Assertions, p cloudprovider.Provider) afero.Fs { return afero.NewMemMapFs() },
|
||||||
creator: &stubCloudCreator{},
|
creator: &stubCloudCreator{},
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
controllerCountFlag: intPtr(1),
|
controllerCountFlag: intPtr(1),
|
||||||
workerCountFlag: intPtr(1),
|
workerCountFlag: intPtr(1),
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
configFlag: "/does/not/exist",
|
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"create error": {
|
"create error": {
|
||||||
@ -184,16 +182,13 @@ func TestCreate(t *testing.T) {
|
|||||||
cmd.SetOut(&bytes.Buffer{})
|
cmd.SetOut(&bytes.Buffer{})
|
||||||
cmd.SetErr(&bytes.Buffer{})
|
cmd.SetErr(&bytes.Buffer{})
|
||||||
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
cmd.Flags().String("tf-log", "NONE", "") // register persistent flag manually
|
cmd.Flags().String("tf-log", "NONE", "") // register persistent flag manually
|
||||||
|
|
||||||
if tc.yesFlag {
|
if tc.yesFlag {
|
||||||
require.NoError(cmd.Flags().Set("yes", "true"))
|
require.NoError(cmd.Flags().Set("yes", "true"))
|
||||||
}
|
}
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
if tc.controllerCountFlag != nil {
|
if tc.controllerCountFlag != nil {
|
||||||
require.NoError(cmd.Flags().Set("control-plane-nodes", strconv.Itoa(*tc.controllerCountFlag)))
|
require.NoError(cmd.Flags().Set("control-plane-nodes", strconv.Itoa(*tc.controllerCountFlag)))
|
||||||
}
|
}
|
||||||
@ -214,7 +209,7 @@ func TestCreate(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
assert.True(tc.creator.createCalled)
|
assert.True(tc.creator.createCalled)
|
||||||
var gotIDFile clusterid.File
|
var gotIDFile clusterid.File
|
||||||
require.NoError(fileHandler.ReadJSON(constants.ClusterIDsFileName, &gotIDFile))
|
require.NoError(fileHandler.ReadJSON(constants.ClusterIDsFilename, &gotIDFile))
|
||||||
assert.Equal(gotIDFile, clusterid.File{
|
assert.Equal(gotIDFile, clusterid.File{
|
||||||
IP: idFile.IP,
|
IP: idFile.IP,
|
||||||
CloudProvider: tc.provider,
|
CloudProvider: tc.provider,
|
||||||
@ -260,7 +255,7 @@ func TestCheckDirClean(t *testing.T) {
|
|||||||
require.NoError(tc.fileHandler.Write(f, []byte{1, 2, 3}, file.OptNone))
|
require.NoError(tc.fileHandler.Write(f, []byte{1, 2, 3}, file.OptNone))
|
||||||
}
|
}
|
||||||
c := &createCmd{log: logger.NewTest(t)}
|
c := &createCmd{log: logger.NewTest(t)}
|
||||||
err := c.checkDirClean(tc.fileHandler)
|
err := c.checkDirClean("", tc.fileHandler)
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
|
@ -130,14 +130,16 @@ func newIAMCreateGCPCmd() *cobra.Command {
|
|||||||
// createRunIAMFunc is the entrypoint for the iam create command. It sets up the iamCreator
|
// createRunIAMFunc is the entrypoint for the iam create command. It sets up the iamCreator
|
||||||
// and starts IAM creation for the specific cloud provider.
|
// and starts IAM creation for the specific cloud provider.
|
||||||
func createRunIAMFunc(provider cloudprovider.Provider) func(cmd *cobra.Command, args []string) error {
|
func createRunIAMFunc(provider cloudprovider.Provider) func(cmd *cobra.Command, args []string) error {
|
||||||
var providerCreator providerIAMCreator
|
var providerCreator func(workspace string) providerIAMCreator
|
||||||
switch provider {
|
switch provider {
|
||||||
case cloudprovider.AWS:
|
case cloudprovider.AWS:
|
||||||
providerCreator = &awsIAMCreator{}
|
providerCreator = func(string) providerIAMCreator { return &awsIAMCreator{} }
|
||||||
case cloudprovider.Azure:
|
case cloudprovider.Azure:
|
||||||
providerCreator = &azureIAMCreator{}
|
providerCreator = func(string) providerIAMCreator { return &azureIAMCreator{} }
|
||||||
case cloudprovider.GCP:
|
case cloudprovider.GCP:
|
||||||
providerCreator = &gcpIAMCreator{}
|
providerCreator = func(workspace string) providerIAMCreator {
|
||||||
|
return &gcpIAMCreator{workspace}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return func(cmd *cobra.Command, args []string) error {
|
return func(cmd *cobra.Command, args []string) error {
|
||||||
return fmt.Errorf("unknown provider %s", provider)
|
return fmt.Errorf("unknown provider %s", provider)
|
||||||
@ -153,21 +155,25 @@ func createRunIAMFunc(provider cloudprovider.Provider) func(cmd *cobra.Command,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
return fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
||||||
}
|
}
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing workspace string: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
iamCreator, err := newIAMCreator(cmd, logLevel)
|
iamCreator, err := newIAMCreator(cmd, workspace, logLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating iamCreator: %w", err)
|
return fmt.Errorf("creating iamCreator: %w", err)
|
||||||
}
|
}
|
||||||
defer iamCreator.spinner.Stop()
|
defer iamCreator.spinner.Stop()
|
||||||
defer iamCreator.log.Sync()
|
defer iamCreator.log.Sync()
|
||||||
iamCreator.provider = provider
|
iamCreator.provider = provider
|
||||||
iamCreator.providerCreator = providerCreator
|
iamCreator.providerCreator = providerCreator(workspace)
|
||||||
return iamCreator.create(cmd.Context())
|
return iamCreator.create(cmd.Context())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newIAMCreator creates a new iamiamCreator.
|
// newIAMCreator creates a new iamiamCreator.
|
||||||
func newIAMCreator(cmd *cobra.Command, logLevel terraform.LogLevel) (*iamCreator, error) {
|
func newIAMCreator(cmd *cobra.Command, workspace string, logLevel terraform.LogLevel) (*iamCreator, error) {
|
||||||
spinner, err := newSpinnerOrStderr(cmd)
|
spinner, err := newSpinnerOrStderr(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("creating spinner: %w", err)
|
return nil, fmt.Errorf("creating spinner: %w", err)
|
||||||
@ -176,7 +182,7 @@ func newIAMCreator(cmd *cobra.Command, logLevel terraform.LogLevel) (*iamCreator
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("creating logger: %w", err)
|
return nil, fmt.Errorf("creating logger: %w", err)
|
||||||
}
|
}
|
||||||
log.Debugf("Terraform logs will be written into %s at level %s", constants.TerraformLogFile, logLevel.String())
|
log.Debugf("Terraform logs will be written into %s at level %s", terraformLogPath(workspace), logLevel.String())
|
||||||
|
|
||||||
return &iamCreator{
|
return &iamCreator{
|
||||||
cmd: cmd,
|
cmd: cmd,
|
||||||
@ -185,7 +191,8 @@ func newIAMCreator(cmd *cobra.Command, logLevel terraform.LogLevel) (*iamCreator
|
|||||||
creator: cloudcmd.NewIAMCreator(spinner),
|
creator: cloudcmd.NewIAMCreator(spinner),
|
||||||
fileHandler: file.NewHandler(afero.NewOsFs()),
|
fileHandler: file.NewHandler(afero.NewOsFs()),
|
||||||
iamConfig: &cloudcmd.IAMConfigOptions{
|
iamConfig: &cloudcmd.IAMConfigOptions{
|
||||||
TFLogLevel: logLevel,
|
TFWorkspace: constants.TerraformIAMWorkingDir,
|
||||||
|
TFLogLevel: logLevel,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -210,7 +217,7 @@ func (c *iamCreator) create(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
c.log.Debugf("Using flags: %+v", flags)
|
c.log.Debugf("Using flags: %+v", flags)
|
||||||
|
|
||||||
if err := c.checkWorkingDir(); err != nil {
|
if err := c.checkWorkingDir(flags.workspace); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,14 +236,14 @@ func (c *iamCreator) create(ctx context.Context) error {
|
|||||||
|
|
||||||
var conf config.Config
|
var conf config.Config
|
||||||
if flags.updateConfig {
|
if flags.updateConfig {
|
||||||
c.log.Debugf("Parsing config %s", flags.configPath)
|
c.log.Debugf("Parsing config %s", configPath(flags.workspace))
|
||||||
if err = c.fileHandler.ReadYAML(flags.configPath, &conf); err != nil {
|
if err = c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil {
|
||||||
return fmt.Errorf("error reading the configuration file: %w", err)
|
return fmt.Errorf("error reading the configuration file: %w", err)
|
||||||
}
|
}
|
||||||
if err := validateConfigWithFlagCompatibility(c.provider, conf, flags); err != nil {
|
if err := validateConfigWithFlagCompatibility(c.provider, conf, flags); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.cmd.Printf("The configuration file %q will be automatically updated with the IAM values and zone/region information.\n", flags.configPath)
|
c.cmd.Printf("The configuration file %q will be automatically updated with the IAM values and zone/region information.\n", configPath(flags.workspace))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.spinner.Start("Creating", false)
|
c.spinner.Start("Creating", false)
|
||||||
@ -254,12 +261,12 @@ func (c *iamCreator) create(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if flags.updateConfig {
|
if flags.updateConfig {
|
||||||
c.log.Debugf("Writing IAM configuration to %s", flags.configPath)
|
c.log.Debugf("Writing IAM configuration to %s", configPath(flags.workspace))
|
||||||
c.providerCreator.writeOutputValuesToConfig(&conf, flags, iamFile)
|
c.providerCreator.writeOutputValuesToConfig(&conf, flags, iamFile)
|
||||||
if err := c.fileHandler.WriteYAML(flags.configPath, conf, file.OptOverwrite); err != nil {
|
if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.cmd.Printf("Your IAM configuration was created and filled into %s successfully.\n", flags.configPath)
|
c.cmd.Printf("Your IAM configuration was created and filled into %s successfully.\n", configPath(flags.workspace))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -271,7 +278,7 @@ func (c *iamCreator) create(ctx context.Context) error {
|
|||||||
|
|
||||||
// parseFlagsAndSetupConfig parses the flags of the iam create command and fills the values into the IAM config (output values of the command).
|
// parseFlagsAndSetupConfig parses the flags of the iam create command and fills the values into the IAM config (output values of the command).
|
||||||
func (c *iamCreator) parseFlagsAndSetupConfig() (iamFlags, error) {
|
func (c *iamCreator) parseFlagsAndSetupConfig() (iamFlags, error) {
|
||||||
configPath, err := c.cmd.Flags().GetString("config")
|
cwd, err := c.cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iamFlags{}, fmt.Errorf("parsing config string: %w", err)
|
return iamFlags{}, fmt.Errorf("parsing config string: %w", err)
|
||||||
}
|
}
|
||||||
@ -285,7 +292,7 @@ func (c *iamCreator) parseFlagsAndSetupConfig() (iamFlags, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := iamFlags{
|
flags := iamFlags{
|
||||||
configPath: configPath,
|
workspace: cwd,
|
||||||
yesFlag: yesFlag,
|
yesFlag: yesFlag,
|
||||||
updateConfig: updateConfig,
|
updateConfig: updateConfig,
|
||||||
}
|
}
|
||||||
@ -299,9 +306,9 @@ func (c *iamCreator) parseFlagsAndSetupConfig() (iamFlags, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkWorkingDir checks if the current working directory already contains a Terraform dir.
|
// checkWorkingDir checks if the current working directory already contains a Terraform dir.
|
||||||
func (c *iamCreator) checkWorkingDir() error {
|
func (c *iamCreator) checkWorkingDir(workspace string) error {
|
||||||
if _, err := c.fileHandler.Stat(constants.TerraformIAMWorkingDir); err == nil {
|
if _, err := c.fileHandler.Stat(constants.TerraformIAMWorkingDir); err == nil {
|
||||||
return fmt.Errorf("the current working directory already contains the Terraform workspace directory %q. Please run the command in a different directory or destroy the existing workspace", constants.TerraformIAMWorkingDir)
|
return fmt.Errorf("the current working directory already contains the Terraform workspace directory %q. Please run the command in a different directory or destroy the existing workspace", terraformIAMWorkspace(workspace))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -311,7 +318,7 @@ type iamFlags struct {
|
|||||||
aws awsFlags
|
aws awsFlags
|
||||||
azure azureFlags
|
azure azureFlags
|
||||||
gcp gcpFlags
|
gcp gcpFlags
|
||||||
configPath string
|
workspace string
|
||||||
yesFlag bool
|
yesFlag bool
|
||||||
updateConfig bool
|
updateConfig bool
|
||||||
}
|
}
|
||||||
@ -481,7 +488,9 @@ func (c *azureIAMCreator) parseAndWriteIDFile(_ iamid.File, _ file.Handler) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// gcpIAMCreator implements the providerIAMCreator interface for GCP.
|
// gcpIAMCreator implements the providerIAMCreator interface for GCP.
|
||||||
type gcpIAMCreator struct{}
|
type gcpIAMCreator struct {
|
||||||
|
workspace string
|
||||||
|
}
|
||||||
|
|
||||||
func (c *gcpIAMCreator) parseFlagsAndSetupConfig(cmd *cobra.Command, flags iamFlags, iamConfig *cloudcmd.IAMConfigOptions) (iamFlags, error) {
|
func (c *gcpIAMCreator) parseFlagsAndSetupConfig(cmd *cobra.Command, flags iamFlags, iamConfig *cloudcmd.IAMConfigOptions) (iamFlags, error) {
|
||||||
zone, err := cmd.Flags().GetString("zone")
|
zone, err := cmd.Flags().GetString("zone")
|
||||||
@ -540,16 +549,16 @@ func (c *gcpIAMCreator) printConfirmValues(cmd *cobra.Command, flags iamFlags) {
|
|||||||
cmd.Printf("Zone:\t\t\t%s\n\n", flags.gcp.zone)
|
cmd.Printf("Zone:\t\t\t%s\n\n", flags.gcp.zone)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gcpIAMCreator) printOutputValues(cmd *cobra.Command, _ iamFlags, _ iamid.File) {
|
func (c *gcpIAMCreator) printOutputValues(cmd *cobra.Command, flags iamFlags, _ iamid.File) {
|
||||||
cmd.Printf("projectID:\t\t%s\n", constants.GCPServiceAccountKeyFile)
|
cmd.Printf("projectID:\t\t%s\n", flags.gcp.projectID)
|
||||||
cmd.Printf("region:\t\t\t%s\n", constants.GCPServiceAccountKeyFile)
|
cmd.Printf("region:\t\t\t%s\n", flags.gcp.region)
|
||||||
cmd.Printf("zone:\t\t\t%s\n", constants.GCPServiceAccountKeyFile)
|
cmd.Printf("zone:\t\t\t%s\n", flags.gcp.zone)
|
||||||
cmd.Printf("serviceAccountKeyPath:\t%s\n\n", constants.GCPServiceAccountKeyFile)
|
cmd.Printf("serviceAccountKeyPath:\t%s\n\n", gcpServiceAccountKeyPath(c.workspace))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gcpIAMCreator) writeOutputValuesToConfig(conf *config.Config, flags iamFlags, _ iamid.File) {
|
func (c *gcpIAMCreator) writeOutputValuesToConfig(conf *config.Config, flags iamFlags, _ iamid.File) {
|
||||||
conf.Provider.GCP.Project = flags.gcp.projectID
|
conf.Provider.GCP.Project = flags.gcp.projectID
|
||||||
conf.Provider.GCP.ServiceAccountKeyPath = constants.GCPServiceAccountKeyFile
|
conf.Provider.GCP.ServiceAccountKeyPath = gcpServiceAccountKeyFile // File was created in workspace, so only the filename is needed.
|
||||||
conf.Provider.GCP.Region = flags.gcp.region
|
conf.Provider.GCP.Region = flags.gcp.region
|
||||||
conf.Provider.GCP.Zone = flags.gcp.zone
|
conf.Provider.GCP.Zone = flags.gcp.zone
|
||||||
for groupName, group := range conf.NodeGroups {
|
for groupName, group := range conf.NodeGroups {
|
||||||
@ -565,7 +574,7 @@ func (c *gcpIAMCreator) parseAndWriteIDFile(iamFile iamid.File, fileHandler file
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fileHandler.WriteJSON(constants.GCPServiceAccountKeyFile, tmpOut, file.OptNone)
|
return fileHandler.WriteJSON(gcpServiceAccountKeyFile, tmpOut, file.OptNone)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseIDFile parses the given base64 encoded JSON string of the GCP service account key and returns a map.
|
// parseIDFile parses the given base64 encoded JSON string of the GCP service account key and returns a map.
|
||||||
|
@ -88,7 +88,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
prefixFlag string
|
prefixFlag string
|
||||||
yesFlag bool
|
yesFlag bool
|
||||||
updateConfigFlag bool
|
updateConfigFlag bool
|
||||||
configFlag string
|
|
||||||
existingConfigFiles []string
|
existingConfigFiles []string
|
||||||
existingDirs []string
|
existingDirs []string
|
||||||
stdin string
|
stdin string
|
||||||
@ -121,7 +120,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
zoneFlag: "us-east-2a",
|
zoneFlag: "us-east-2a",
|
||||||
prefixFlag: "test",
|
prefixFlag: "test",
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
@ -160,28 +158,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
prefixFlag: "test",
|
prefixFlag: "test",
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
},
|
},
|
||||||
"iam create aws --update-config with --config": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.AWS,
|
|
||||||
zoneFlag: "us-east-2a",
|
|
||||||
prefixFlag: "test",
|
|
||||||
yesFlag: true,
|
|
||||||
updateConfigFlag: true,
|
|
||||||
configFlag: "custom-config.yaml",
|
|
||||||
existingConfigFiles: []string{"custom-config.yaml"},
|
|
||||||
},
|
|
||||||
"iam create aws --update-config --config path doesn't exist": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.AWS,
|
|
||||||
zoneFlag: "us-east-2a",
|
|
||||||
prefixFlag: "test",
|
|
||||||
yesFlag: true,
|
|
||||||
updateConfigFlag: true,
|
|
||||||
wantErr: true,
|
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
},
|
|
||||||
"iam create aws existing terraform dir": {
|
"iam create aws existing terraform dir": {
|
||||||
setupFs: defaultFs,
|
setupFs: defaultFs,
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
creator: &stubIAMCreator{id: validIAMIDFile},
|
||||||
@ -207,7 +183,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
zoneFlag: "us-east-2a",
|
zoneFlag: "us-east-2a",
|
||||||
prefixFlag: "test",
|
prefixFlag: "test",
|
||||||
stdin: "yes\n",
|
stdin: "yes\n",
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
@ -228,7 +203,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
prefixFlag: "test",
|
prefixFlag: "test",
|
||||||
stdin: "no\n",
|
stdin: "no\n",
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
wantAbort: true,
|
wantAbort: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
@ -250,7 +224,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +238,7 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
|
|
||||||
// register persistent flags manually
|
// register persistent flags manually
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "")
|
cmd.Flags().String("workspace", "", "")
|
||||||
cmd.Flags().Bool("update-config", false, "")
|
cmd.Flags().Bool("update-config", false, "")
|
||||||
cmd.Flags().Bool("yes", false, "")
|
cmd.Flags().Bool("yes", false, "")
|
||||||
cmd.Flags().String("name", "constell", "")
|
cmd.Flags().String("name", "constell", "")
|
||||||
@ -283,9 +256,6 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
if tc.updateConfigFlag {
|
if tc.updateConfigFlag {
|
||||||
require.NoError(cmd.Flags().Set("update-config", "true"))
|
require.NoError(cmd.Flags().Set("update-config", "true"))
|
||||||
}
|
}
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
|
|
||||||
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider, tc.existingConfigFiles, tc.existingDirs))
|
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider, tc.existingConfigFiles, tc.existingDirs))
|
||||||
|
|
||||||
@ -314,7 +284,7 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
|
|
||||||
if tc.updateConfigFlag {
|
if tc.updateConfigFlag {
|
||||||
readConfig := &config.Config{}
|
readConfig := &config.Config{}
|
||||||
readErr := fileHandler.ReadYAML(tc.configFlag, readConfig)
|
readErr := fileHandler.ReadYAML(constants.ConfigFilename, readConfig)
|
||||||
require.NoError(readErr)
|
require.NoError(readErr)
|
||||||
assert.Equal(tc.creator.id.AWSOutput.ControlPlaneInstanceProfile, readConfig.Provider.AWS.IAMProfileControlPlane)
|
assert.Equal(tc.creator.id.AWSOutput.ControlPlaneInstanceProfile, readConfig.Provider.AWS.IAMProfileControlPlane)
|
||||||
assert.Equal(tc.creator.id.AWSOutput.WorkerNodeInstanceProfile, readConfig.Provider.AWS.IAMProfileWorkerNodes)
|
assert.Equal(tc.creator.id.AWSOutput.WorkerNodeInstanceProfile, readConfig.Provider.AWS.IAMProfileWorkerNodes)
|
||||||
@ -329,17 +299,7 @@ func TestIAMCreateAWS(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIAMCreateAzure(t *testing.T) {
|
func TestIAMCreateAzure(t *testing.T) {
|
||||||
defaultFs := func(require *require.Assertions, provider cloudprovider.Provider, existingConfigFiles []string, existingDirs []string) afero.Fs {
|
defaultFs := createFSWithConfig(*createConfig(cloudprovider.Azure))
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
fileHandler := file.NewHandler(fs)
|
|
||||||
for _, f := range existingConfigFiles {
|
|
||||||
require.NoError(fileHandler.WriteYAML(f, createConfig(cloudprovider.Azure), file.OptNone))
|
|
||||||
}
|
|
||||||
for _, d := range existingDirs {
|
|
||||||
require.NoError(fs.MkdirAll(d, 0o755))
|
|
||||||
}
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
readOnlyFs := func(require *require.Assertions, provider cloudprovider.Provider, existingConfigFiles []string, existingDirs []string) afero.Fs {
|
readOnlyFs := func(require *require.Assertions, provider cloudprovider.Provider, existingConfigFiles []string, existingDirs []string) afero.Fs {
|
||||||
fs := afero.NewReadOnlyFs(afero.NewMemMapFs())
|
fs := afero.NewReadOnlyFs(afero.NewMemMapFs())
|
||||||
return fs
|
return fs
|
||||||
@ -362,7 +322,6 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
resourceGroupFlag string
|
resourceGroupFlag string
|
||||||
yesFlag bool
|
yesFlag bool
|
||||||
updateConfigFlag bool
|
updateConfigFlag bool
|
||||||
configFlag string
|
|
||||||
existingConfigFiles []string
|
existingConfigFiles []string
|
||||||
existingDirs []string
|
existingDirs []string
|
||||||
stdin string
|
stdin string
|
||||||
@ -396,46 +355,9 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
servicePrincipalFlag: "constell-test-sp",
|
servicePrincipalFlag: "constell-test-sp",
|
||||||
resourceGroupFlag: "constell-test-rg",
|
resourceGroupFlag: "constell-test-rg",
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
"iam create azure --update-config with --config": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
regionFlag: "westus",
|
|
||||||
servicePrincipalFlag: "constell-test-sp",
|
|
||||||
resourceGroupFlag: "constell-test-rg",
|
|
||||||
updateConfigFlag: true,
|
|
||||||
configFlag: "custom-config.yaml",
|
|
||||||
yesFlag: true,
|
|
||||||
existingConfigFiles: []string{"custom-config.yaml"},
|
|
||||||
},
|
|
||||||
"iam create azure --update-config custom --config path doesn't exist": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
regionFlag: "westus",
|
|
||||||
servicePrincipalFlag: "constell-test-sp",
|
|
||||||
resourceGroupFlag: "constell-test-rg",
|
|
||||||
updateConfigFlag: true,
|
|
||||||
yesFlag: true,
|
|
||||||
wantErr: true,
|
|
||||||
configFlag: "custom-config.yaml",
|
|
||||||
},
|
|
||||||
"iam create azur --update-config --config path doesn't exists": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.Azure,
|
|
||||||
regionFlag: "westus",
|
|
||||||
servicePrincipalFlag: "constell-test-sp",
|
|
||||||
resourceGroupFlag: "constell-test-rg",
|
|
||||||
updateConfigFlag: true,
|
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
yesFlag: true,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"iam create azure existing terraform dir": {
|
"iam create azure existing terraform dir": {
|
||||||
setupFs: defaultFs,
|
setupFs: defaultFs,
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
creator: &stubIAMCreator{id: validIAMIDFile},
|
||||||
@ -465,7 +387,6 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
resourceGroupFlag: "constell-test-rg",
|
resourceGroupFlag: "constell-test-rg",
|
||||||
stdin: "yes\n",
|
stdin: "yes\n",
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
"interactive abort": {
|
"interactive abort": {
|
||||||
@ -499,7 +420,6 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
resourceGroupFlag: "constell-test-rg",
|
resourceGroupFlag: "constell-test-rg",
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -515,7 +435,7 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
|
|
||||||
// register persistent flags manually
|
// register persistent flags manually
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "")
|
cmd.Flags().String("workspace", "", "")
|
||||||
cmd.Flags().Bool("update-config", false, "")
|
cmd.Flags().Bool("update-config", false, "")
|
||||||
cmd.Flags().Bool("yes", false, "")
|
cmd.Flags().Bool("yes", false, "")
|
||||||
cmd.Flags().String("name", "constell", "")
|
cmd.Flags().String("name", "constell", "")
|
||||||
@ -536,9 +456,6 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
if tc.updateConfigFlag {
|
if tc.updateConfigFlag {
|
||||||
require.NoError(cmd.Flags().Set("update-config", "true"))
|
require.NoError(cmd.Flags().Set("update-config", "true"))
|
||||||
}
|
}
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
|
|
||||||
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider, tc.existingConfigFiles, tc.existingDirs))
|
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider, tc.existingConfigFiles, tc.existingDirs))
|
||||||
|
|
||||||
@ -566,7 +483,7 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
|
|
||||||
if tc.updateConfigFlag {
|
if tc.updateConfigFlag {
|
||||||
readConfig := &config.Config{}
|
readConfig := &config.Config{}
|
||||||
readErr := fileHandler.ReadYAML(tc.configFlag, readConfig)
|
readErr := fileHandler.ReadYAML(constants.ConfigFilename, readConfig)
|
||||||
require.NoError(readErr)
|
require.NoError(readErr)
|
||||||
assert.Equal(tc.creator.id.AzureOutput.SubscriptionID, readConfig.Provider.Azure.SubscriptionID)
|
assert.Equal(tc.creator.id.AzureOutput.SubscriptionID, readConfig.Provider.Azure.SubscriptionID)
|
||||||
assert.Equal(tc.creator.id.AzureOutput.TenantID, readConfig.Provider.Azure.TenantID)
|
assert.Equal(tc.creator.id.AzureOutput.TenantID, readConfig.Provider.Azure.TenantID)
|
||||||
@ -582,17 +499,7 @@ func TestIAMCreateAzure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIAMCreateGCP(t *testing.T) {
|
func TestIAMCreateGCP(t *testing.T) {
|
||||||
defaultFs := func(require *require.Assertions, provider cloudprovider.Provider, existingConfigFiles []string, existingDirs []string) afero.Fs {
|
defaultFs := createFSWithConfig(*createConfig(cloudprovider.GCP))
|
||||||
fs := afero.NewMemMapFs()
|
|
||||||
fileHandler := file.NewHandler(fs)
|
|
||||||
for _, f := range existingConfigFiles {
|
|
||||||
require.NoError(fileHandler.WriteYAML(f, createConfig(cloudprovider.GCP), file.OptNone))
|
|
||||||
}
|
|
||||||
for _, d := range existingDirs {
|
|
||||||
require.NoError(fs.MkdirAll(d, 0o755))
|
|
||||||
}
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
readOnlyFs := func(require *require.Assertions, provider cloudprovider.Provider, existingConfigFiles []string, existingDirs []string) afero.Fs {
|
readOnlyFs := func(require *require.Assertions, provider cloudprovider.Provider, existingConfigFiles []string, existingDirs []string) afero.Fs {
|
||||||
fs := afero.NewReadOnlyFs(afero.NewMemMapFs())
|
fs := afero.NewReadOnlyFs(afero.NewMemMapFs())
|
||||||
return fs
|
return fs
|
||||||
@ -619,7 +526,6 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
projectIDFlag string
|
projectIDFlag string
|
||||||
yesFlag bool
|
yesFlag bool
|
||||||
updateConfigFlag bool
|
updateConfigFlag bool
|
||||||
configFlag string
|
|
||||||
existingConfigFiles []string
|
existingConfigFiles []string
|
||||||
existingDirs []string
|
existingDirs []string
|
||||||
stdin string
|
stdin string
|
||||||
@ -653,46 +559,9 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
serviceAccountIDFlag: "constell-test",
|
serviceAccountIDFlag: "constell-test",
|
||||||
projectIDFlag: "constell-1234",
|
projectIDFlag: "constell-1234",
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
"iam create gcp --update-config with --config": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.GCP,
|
|
||||||
zoneFlag: "europe-west1-a",
|
|
||||||
serviceAccountIDFlag: "constell-test",
|
|
||||||
projectIDFlag: "constell-1234",
|
|
||||||
updateConfigFlag: true,
|
|
||||||
configFlag: "custom-config.yaml",
|
|
||||||
yesFlag: true,
|
|
||||||
existingConfigFiles: []string{"custom-config.yaml"},
|
|
||||||
},
|
|
||||||
"iam create gcp --update-config --config path doesn't exists": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.GCP,
|
|
||||||
zoneFlag: "europe-west1-a",
|
|
||||||
serviceAccountIDFlag: "constell-test",
|
|
||||||
projectIDFlag: "constell-1234",
|
|
||||||
updateConfigFlag: true,
|
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
yesFlag: true,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"iam create gcp --update-config wrong --config path": {
|
|
||||||
setupFs: defaultFs,
|
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
|
||||||
provider: cloudprovider.GCP,
|
|
||||||
zoneFlag: "europe-west1-a",
|
|
||||||
serviceAccountIDFlag: "constell-test",
|
|
||||||
projectIDFlag: "constell-1234",
|
|
||||||
updateConfigFlag: true,
|
|
||||||
configFlag: "custom-config.yaml",
|
|
||||||
yesFlag: true,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"iam create gcp existing terraform dir": {
|
"iam create gcp existing terraform dir": {
|
||||||
setupFs: defaultFs,
|
setupFs: defaultFs,
|
||||||
creator: &stubIAMCreator{id: validIAMIDFile},
|
creator: &stubIAMCreator{id: validIAMIDFile},
|
||||||
@ -740,7 +609,6 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
serviceAccountIDFlag: "constell-test",
|
serviceAccountIDFlag: "constell-test",
|
||||||
projectIDFlag: "constell-1234",
|
projectIDFlag: "constell-1234",
|
||||||
stdin: "yes\n",
|
stdin: "yes\n",
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
@ -763,7 +631,6 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
projectIDFlag: "constell-1234",
|
projectIDFlag: "constell-1234",
|
||||||
stdin: "no\n",
|
stdin: "no\n",
|
||||||
wantAbort: true,
|
wantAbort: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
existingConfigFiles: []string{constants.ConfigFilename},
|
existingConfigFiles: []string{constants.ConfigFilename},
|
||||||
},
|
},
|
||||||
@ -776,7 +643,6 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
projectIDFlag: "constell-1234",
|
projectIDFlag: "constell-1234",
|
||||||
yesFlag: true,
|
yesFlag: true,
|
||||||
updateConfigFlag: true,
|
updateConfigFlag: true,
|
||||||
configFlag: constants.ConfigFilename,
|
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -792,7 +658,7 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
|
|
||||||
// register persistent flags manually
|
// register persistent flags manually
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "")
|
cmd.Flags().String("workspace", "", "")
|
||||||
cmd.Flags().Bool("update-config", false, "")
|
cmd.Flags().Bool("update-config", false, "")
|
||||||
cmd.Flags().Bool("yes", false, "")
|
cmd.Flags().Bool("yes", false, "")
|
||||||
cmd.Flags().String("name", "constell", "")
|
cmd.Flags().String("name", "constell", "")
|
||||||
@ -813,9 +679,6 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
if tc.updateConfigFlag {
|
if tc.updateConfigFlag {
|
||||||
require.NoError(cmd.Flags().Set("update-config", "true"))
|
require.NoError(cmd.Flags().Set("update-config", "true"))
|
||||||
}
|
}
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
|
|
||||||
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider, tc.existingConfigFiles, tc.existingDirs))
|
fileHandler := file.NewHandler(tc.setupFs(require, tc.provider, tc.existingConfigFiles, tc.existingDirs))
|
||||||
|
|
||||||
@ -843,15 +706,15 @@ func TestIAMCreateGCP(t *testing.T) {
|
|||||||
|
|
||||||
if tc.updateConfigFlag {
|
if tc.updateConfigFlag {
|
||||||
readConfig := &config.Config{}
|
readConfig := &config.Config{}
|
||||||
readErr := fileHandler.ReadYAML(tc.configFlag, readConfig)
|
readErr := fileHandler.ReadYAML(constants.ConfigFilename, readConfig)
|
||||||
require.NoError(readErr)
|
require.NoError(readErr)
|
||||||
assert.Equal(constants.GCPServiceAccountKeyFile, readConfig.Provider.GCP.ServiceAccountKeyPath)
|
assert.Equal(gcpServiceAccountKeyFile, readConfig.Provider.GCP.ServiceAccountKeyPath)
|
||||||
}
|
}
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
assert.True(tc.creator.createCalled)
|
assert.True(tc.creator.createCalled)
|
||||||
assert.Equal(tc.creator.id.GCPOutput, validIAMIDFile.GCPOutput)
|
assert.Equal(tc.creator.id.GCPOutput, validIAMIDFile.GCPOutput)
|
||||||
readServiceAccountKey := &map[string]string{}
|
readServiceAccountKey := &map[string]string{}
|
||||||
readErr := fileHandler.ReadJSON(constants.GCPServiceAccountKeyFile, readServiceAccountKey)
|
readErr := fileHandler.ReadJSON(gcpServiceAccountKeyFile, readServiceAccountKey)
|
||||||
require.NoError(readErr)
|
require.NoError(readErr)
|
||||||
assert.Equal("not_a_secret", (*readServiceAccountKey)["private_key_id"])
|
assert.Equal("not_a_secret", (*readServiceAccountKey)["private_key_id"])
|
||||||
})
|
})
|
||||||
@ -939,7 +802,7 @@ func createFSWithConfig(cfg config.Config) func(require *require.Assertions, pro
|
|||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
fileHandler := file.NewHandler(fs)
|
fileHandler := file.NewHandler(fs)
|
||||||
for _, f := range existingConfigFiles {
|
for _, f := range existingConfigFiles {
|
||||||
require.NoError(fileHandler.WriteYAML(f, cfg, file.OptNone))
|
require.NoError(fileHandler.WriteYAML(f, cfg, file.OptMkdirAll))
|
||||||
}
|
}
|
||||||
for _, d := range existingDirs {
|
for _, d := range existingDirs {
|
||||||
require.NoError(fs.MkdirAll(d, 0o755))
|
require.NoError(fs.MkdirAll(d, 0o755))
|
||||||
|
@ -41,10 +41,7 @@ func runIAMDestroy(cmd *cobra.Command, _ []string) error {
|
|||||||
}
|
}
|
||||||
defer log.Sync()
|
defer log.Sync()
|
||||||
spinner := newSpinner(cmd.ErrOrStderr())
|
spinner := newSpinner(cmd.ErrOrStderr())
|
||||||
destroyer, err := cloudcmd.NewIAMDestroyer(cmd.Context())
|
destroyer := cloudcmd.NewIAMDestroyer()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fsHandler := file.NewHandler(afero.NewOsFs())
|
fsHandler := file.NewHandler(afero.NewOsFs())
|
||||||
|
|
||||||
c := &destroyCmd{log: log}
|
c := &destroyCmd{log: log}
|
||||||
@ -63,27 +60,27 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if there is a possibility that the cluster is still running by looking out for specific files
|
// check if there is a possibility that the cluster is still running by looking out for specific files
|
||||||
c.log.Debugf("Checking if %q exists", constants.AdminConfFilename)
|
c.log.Debugf("Checking if %q exists", adminConfPath(flags.workspace))
|
||||||
_, err = fsHandler.Stat(constants.AdminConfFilename)
|
_, err = fsHandler.Stat(constants.AdminConfFilename)
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", constants.AdminConfFilename)
|
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", adminConfPath(flags.workspace))
|
||||||
}
|
}
|
||||||
c.log.Debugf("Checking if %q exists", constants.ClusterIDsFileName)
|
c.log.Debugf("Checking if %q exists", clusterIDsPath(flags.workspace))
|
||||||
_, err = fsHandler.Stat(constants.ClusterIDsFileName)
|
_, err = fsHandler.Stat(constants.ClusterIDsFilename)
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", constants.ClusterIDsFileName)
|
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", clusterIDsPath(flags.workspace))
|
||||||
}
|
}
|
||||||
|
|
||||||
gcpFileExists := false
|
gcpFileExists := false
|
||||||
|
|
||||||
c.log.Debugf("Checking if %q exists", constants.GCPServiceAccountKeyFile)
|
c.log.Debugf("Checking if %q exists", gcpServiceAccountKeyPath(flags.workspace))
|
||||||
_, err = fsHandler.Stat(constants.GCPServiceAccountKeyFile)
|
_, err = fsHandler.Stat(gcpServiceAccountKeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c.log.Debugf("%q exists", constants.GCPServiceAccountKeyFile)
|
c.log.Debugf("%q exists", gcpServiceAccountKeyPath(flags.workspace))
|
||||||
gcpFileExists = true
|
gcpFileExists = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,7 +88,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
|
|||||||
// Confirmation
|
// Confirmation
|
||||||
confirmString := "Do you really want to destroy your IAM configuration? Note that this will remove all resources in the resource group."
|
confirmString := "Do you really want to destroy your IAM configuration? Note that this will remove all resources in the resource group."
|
||||||
if gcpFileExists {
|
if gcpFileExists {
|
||||||
confirmString += fmt.Sprintf("\nThis will also delete %q", constants.GCPServiceAccountKeyFile)
|
confirmString += fmt.Sprintf("\nThis will also delete %q", gcpServiceAccountKeyPath(flags.workspace))
|
||||||
}
|
}
|
||||||
ok, err := askToConfirm(cmd, confirmString)
|
ok, err := askToConfirm(cmd, confirmString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -104,8 +101,8 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gcpFileExists {
|
if gcpFileExists {
|
||||||
c.log.Debugf("Starting to delete %q", constants.GCPServiceAccountKeyFile)
|
c.log.Debugf("Starting to delete %q", gcpServiceAccountKeyPath(flags.workspace))
|
||||||
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, destroyer, fsHandler)
|
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, destroyer, flags.workspace, fsHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -119,7 +116,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
|
|||||||
|
|
||||||
spinner.Start("Destroying IAM configuration", false)
|
spinner.Start("Destroying IAM configuration", false)
|
||||||
defer spinner.Stop()
|
defer spinner.Stop()
|
||||||
if err := destroyer.DestroyIAMConfiguration(cmd.Context(), flags.tfLogLevel); err != nil {
|
if err := destroyer.DestroyIAMConfiguration(cmd.Context(), constants.TerraformIAMWorkingDir, flags.tfLogLevel); err != nil {
|
||||||
return fmt.Errorf("destroying IAM configuration: %w", err)
|
return fmt.Errorf("destroying IAM configuration: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,36 +125,37 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroyer iamDestroyer, fsHandler file.Handler) (bool, error) {
|
func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroyer iamDestroyer, workspace string, fsHandler file.Handler) (bool, error) {
|
||||||
var fileSaKey gcpshared.ServiceAccountKey
|
var fileSaKey gcpshared.ServiceAccountKey
|
||||||
|
|
||||||
c.log.Debugf("Parsing %q", constants.GCPServiceAccountKeyFile)
|
c.log.Debugf("Parsing %q", gcpServiceAccountKeyPath(workspace))
|
||||||
if err := fsHandler.ReadJSON(constants.GCPServiceAccountKeyFile, &fileSaKey); err != nil {
|
if err := fsHandler.ReadJSON(gcpServiceAccountKeyFile, &fileSaKey); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debugf("Getting service account key from the tfstate")
|
c.log.Debugf("Getting service account key from the tfstate")
|
||||||
tfSaKey, err := destroyer.GetTfstateServiceAccountKey(cmd.Context())
|
tfSaKey, err := destroyer.GetTfStateServiceAccountKey(cmd.Context(), constants.TerraformIAMWorkingDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debugf("Checking if keys are the same")
|
c.log.Debugf("Checking if keys are the same")
|
||||||
if tfSaKey != fileSaKey {
|
if tfSaKey != fileSaKey {
|
||||||
cmd.Printf("The key in %q don't match up with your Terraform state. %q will not be deleted.\n", constants.GCPServiceAccountKeyFile, constants.GCPServiceAccountKeyFile)
|
cmd.Printf("The key in %q don't match up with your Terraform state. %q will not be deleted.\n", gcpServiceAccountKeyPath(workspace), gcpServiceAccountKeyPath(workspace))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fsHandler.Remove(constants.GCPServiceAccountKeyFile); err != nil {
|
if err := fsHandler.Remove(gcpServiceAccountKeyFile); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debugf("Successfully deleted %q", constants.GCPServiceAccountKeyFile)
|
c.log.Debugf("Successfully deleted %q", gcpServiceAccountKeyPath(workspace))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type destroyFlags struct {
|
type destroyFlags struct {
|
||||||
yes bool
|
yes bool
|
||||||
|
workspace string
|
||||||
tfLogLevel terraform.LogLevel
|
tfLogLevel terraform.LogLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,6 +167,12 @@ func (c *destroyCmd) parseDestroyFlags(cmd *cobra.Command) (destroyFlags, error)
|
|||||||
}
|
}
|
||||||
c.log.Debugf("Yes flag is %t", yes)
|
c.log.Debugf("Yes flag is %t", yes)
|
||||||
|
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return destroyFlags{}, fmt.Errorf("parsing workspace string: %w", err)
|
||||||
|
}
|
||||||
|
c.log.Debugf("Workspace set to %q", workspace)
|
||||||
|
|
||||||
logLevelString, err := cmd.Flags().GetString("tf-log")
|
logLevelString, err := cmd.Flags().GetString("tf-log")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return destroyFlags{}, fmt.Errorf("parsing tf-log string: %w", err)
|
return destroyFlags{}, fmt.Errorf("parsing tf-log string: %w", err)
|
||||||
@ -177,10 +181,11 @@ func (c *destroyCmd) parseDestroyFlags(cmd *cobra.Command) (destroyFlags, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return destroyFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
return destroyFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
||||||
}
|
}
|
||||||
c.log.Debugf("Terraform logs will be written into %s at level %s", constants.TerraformLogFile, logLevel.String())
|
c.log.Debugf("Terraform logs will be written into %s at level %s", terraformLogPath(workspace), logLevel.String())
|
||||||
|
|
||||||
return destroyFlags{
|
return destroyFlags{
|
||||||
tfLogLevel: logLevel,
|
tfLogLevel: logLevel,
|
||||||
|
workspace: workspace,
|
||||||
yes: yes,
|
yes: yes,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ func TestIAMDestroy(t *testing.T) {
|
|||||||
|
|
||||||
newFsExists := func() file.Handler {
|
newFsExists := func() file.Handler {
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
fh := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fh.Write(constants.GCPServiceAccountKeyFile, []byte("{}")))
|
require.NoError(fh.Write(gcpServiceAccountKeyFile, []byte("{}")))
|
||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
newFsMissing := func() file.Handler {
|
newFsMissing := func() file.Handler {
|
||||||
@ -38,7 +38,7 @@ func TestIAMDestroy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
newFsWithClusterIDFile := func() file.Handler {
|
newFsWithClusterIDFile := func() file.Handler {
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
fh := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fh.Write(constants.ClusterIDsFileName, []byte("")))
|
require.NoError(fh.Write(constants.ClusterIDsFilename, []byte("")))
|
||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ func TestIAMDestroy(t *testing.T) {
|
|||||||
"gcp delete error": {
|
"gcp delete error": {
|
||||||
fh: newFsExists(),
|
fh: newFsExists(),
|
||||||
yesFlag: "true",
|
yesFlag: "true",
|
||||||
iamDestroyer: &stubIAMDestroyer{getTfstateKeyErr: someError},
|
iamDestroyer: &stubIAMDestroyer{getTfStateKeyErr: someError},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -108,6 +108,7 @@ func TestIAMDestroy(t *testing.T) {
|
|||||||
|
|
||||||
// register persistent flags manually
|
// register persistent flags manually
|
||||||
cmd.Flags().String("tf-log", "NONE", "")
|
cmd.Flags().String("tf-log", "NONE", "")
|
||||||
|
cmd.Flags().String("workspace", "", "")
|
||||||
|
|
||||||
assert.NoError(cmd.Flags().Set("yes", tc.yesFlag))
|
assert.NoError(cmd.Flags().Set("yes", tc.yesFlag))
|
||||||
|
|
||||||
@ -146,12 +147,12 @@ func TestDeleteGCPServiceAccountKeyFile(t *testing.T) {
|
|||||||
|
|
||||||
newFs := func() file.Handler {
|
newFs := func() file.Handler {
|
||||||
fs := file.NewHandler(afero.NewMemMapFs())
|
fs := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fs.Write(constants.GCPServiceAccountKeyFile, []byte(gcpFile)))
|
require.NoError(fs.Write(gcpServiceAccountKeyFile, []byte(gcpFile)))
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
newFsInvalidJSON := func() file.Handler {
|
newFsInvalidJSON := func() file.Handler {
|
||||||
fh := file.NewHandler(afero.NewMemMapFs())
|
fh := file.NewHandler(afero.NewMemMapFs())
|
||||||
require.NoError(fh.Write(constants.GCPServiceAccountKeyFile, []byte("asdf")))
|
require.NoError(fh.Write(gcpServiceAccountKeyFile, []byte("asdf")))
|
||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +170,7 @@ func TestDeleteGCPServiceAccountKeyFile(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"error getting key terraform": {
|
"error getting key terraform": {
|
||||||
destroyer: &stubIAMDestroyer{getTfstateKeyErr: someError},
|
destroyer: &stubIAMDestroyer{getTfStateKeyErr: someError},
|
||||||
fsHandler: newFs(),
|
fsHandler: newFs(),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
wantGetSaKeyCalled: true,
|
wantGetSaKeyCalled: true,
|
||||||
@ -201,7 +202,7 @@ func TestDeleteGCPServiceAccountKeyFile(t *testing.T) {
|
|||||||
|
|
||||||
c := &destroyCmd{log: logger.NewTest(t)}
|
c := &destroyCmd{log: logger.NewTest(t)}
|
||||||
|
|
||||||
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, tc.destroyer, tc.fsHandler)
|
proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, tc.destroyer, "", tc.fsHandler)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
} else {
|
} else {
|
||||||
@ -209,7 +210,7 @@ func TestDeleteGCPServiceAccountKeyFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(tc.wantProceed, proceed)
|
assert.Equal(tc.wantProceed, proceed)
|
||||||
assert.Equal(tc.wantGetSaKeyCalled, tc.destroyer.getTfstateKeyCalled)
|
assert.Equal(tc.wantGetSaKeyCalled, tc.destroyer.getTfStateKeyCalled)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
@ -50,23 +51,18 @@ func newIAMUpgradeApplyCmd() *cobra.Command {
|
|||||||
Args: cobra.NoArgs,
|
Args: cobra.NoArgs,
|
||||||
RunE: runIAMUpgradeApply,
|
RunE: runIAMUpgradeApply,
|
||||||
}
|
}
|
||||||
cmd.Flags().BoolP("yes", "y", false, "run upgrades without further confirmation\n")
|
cmd.Flags().BoolP("yes", "y", false, "run upgrades without further confirmation")
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func runIAMUpgradeApply(cmd *cobra.Command, _ []string) error {
|
func runIAMUpgradeApply(cmd *cobra.Command, _ []string) error {
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
force, err := cmd.Flags().GetBool("force")
|
force, err := cmd.Flags().GetBool("force")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing force argument: %w", err)
|
return fmt.Errorf("parsing force argument: %w", err)
|
||||||
}
|
}
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
configFetcher := attestationconfigapi.NewFetcher()
|
configFetcher := attestationconfigapi.NewFetcher()
|
||||||
conf, err := config.New(fileHandler, configPath, configFetcher, force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, configFetcher, force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -75,7 +71,7 @@ func runIAMUpgradeApply(cmd *cobra.Command, _ []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
upgradeID := "iam-" + time.Now().Format("20060102150405") + "-" + strings.Split(uuid.New().String(), "-")[0]
|
upgradeID := "iam-" + time.Now().Format("20060102150405") + "-" + strings.Split(uuid.New().String(), "-")[0]
|
||||||
iamMigrateCmd, err := upgrade.NewIAMMigrateCmd(cmd.Context(), upgradeID, conf.GetProvider(), terraform.LogLevelDebug)
|
iamMigrateCmd, err := upgrade.NewIAMMigrateCmd(cmd.Context(), constants.TerraformIAMWorkingDir, constants.UpgradeDir, upgradeID, conf.GetProvider(), terraform.LogLevelDebug)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("setting up IAM migration command: %w", err)
|
return fmt.Errorf("setting up IAM migration command: %w", err)
|
||||||
}
|
}
|
||||||
@ -90,7 +86,7 @@ func runIAMUpgradeApply(cmd *cobra.Command, _ []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = migrator.applyMigration(cmd, file.NewHandler(afero.NewOsFs()), iamMigrateCmd, yes)
|
err = migrator.applyMigration(cmd, constants.UpgradeDir, file.NewHandler(afero.NewOsFs()), iamMigrateCmd, yes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("applying IAM migration: %w", err)
|
return fmt.Errorf("applying IAM migration: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
@ -65,7 +66,6 @@ func NewInitCmd() *cobra.Command {
|
|||||||
Args: cobra.ExactArgs(0),
|
Args: cobra.ExactArgs(0),
|
||||||
RunE: runInitialize,
|
RunE: runInitialize,
|
||||||
}
|
}
|
||||||
cmd.Flags().String("master-secret", "", "path to base64-encoded master secret")
|
|
||||||
cmd.Flags().Bool("conformance", false, "enable conformance mode")
|
cmd.Flags().Bool("conformance", false, "enable conformance mode")
|
||||||
cmd.Flags().Bool("skip-helm-wait", false, "install helm charts without waiting for deployments to be ready")
|
cmd.Flags().Bool("skip-helm-wait", false, "install helm charts without waiting for deployments to be ready")
|
||||||
cmd.Flags().Bool("merge-kubeconfig", false, "merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config")
|
cmd.Flags().Bool("merge-kubeconfig", false, "merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config")
|
||||||
@ -76,9 +76,8 @@ type initCmd struct {
|
|||||||
log debugLog
|
log debugLog
|
||||||
merger configMerger
|
merger configMerger
|
||||||
spinner spinnerInterf
|
spinner spinnerInterf
|
||||||
masterSecret uri.MasterSecret
|
|
||||||
fileHandler file.Handler
|
fileHandler file.Handler
|
||||||
helmInstaller helm.Initializer
|
helmInstaller initializer
|
||||||
clusterShower clusterShower
|
clusterShower clusterShower
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,7 +86,7 @@ type clusterShower interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newInitCmd(
|
func newInitCmd(
|
||||||
clusterShower clusterShower, helmInstaller helm.Initializer, fileHandler file.Handler,
|
clusterShower clusterShower, helmInstaller initializer, fileHandler file.Handler,
|
||||||
spinner spinnerInterf, merger configMerger, log debugLog,
|
spinner spinnerInterf, merger configMerger, log debugLog,
|
||||||
) *initCmd {
|
) *initCmd {
|
||||||
return &initCmd{
|
return &initCmd{
|
||||||
@ -121,14 +120,15 @@ func runInitialize(cmd *cobra.Command, _ []string) error {
|
|||||||
ctx, cancel := context.WithTimeout(cmd.Context(), time.Hour)
|
ctx, cancel := context.WithTimeout(cmd.Context(), time.Hour)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cmd.SetContext(ctx)
|
cmd.SetContext(ctx)
|
||||||
helmInstaller, err := helm.NewInitializer(log)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("creating Helm installer: %w", err)
|
|
||||||
}
|
|
||||||
tfClient, err := terraform.New(ctx, constants.TerraformWorkingDir)
|
tfClient, err := terraform.New(ctx, constants.TerraformWorkingDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating Terraform client: %w", err)
|
return fmt.Errorf("creating Terraform client: %w", err)
|
||||||
}
|
}
|
||||||
|
helmInstaller, err := helm.NewInitializer(log, constants.AdminConfFilename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating Helm installer: %w", err)
|
||||||
|
}
|
||||||
i := newInitCmd(tfClient, helmInstaller, fileHandler, spinner, &kubeconfigMerger{log: log}, log)
|
i := newInitCmd(tfClient, helmInstaller, fileHandler, spinner, &kubeconfigMerger{log: log}, log)
|
||||||
fetcher := attestationconfigapi.NewFetcher()
|
fetcher := attestationconfigapi.NewFetcher()
|
||||||
return i.initialize(cmd, newDialer, license.NewClient(), fetcher)
|
return i.initialize(cmd, newDialer, license.NewClient(), fetcher)
|
||||||
@ -143,8 +143,8 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
i.log.Debugf("Using flags: %+v", flags)
|
i.log.Debugf("Using flags: %+v", flags)
|
||||||
i.log.Debugf("Loading configuration file from %q", flags.configPath)
|
i.log.Debugf("Loading configuration file from %q", configPath(flags.workspace))
|
||||||
conf, err := config.New(i.fileHandler, flags.configPath, configFetcher, flags.force)
|
conf, err := config.New(i.fileHandler, constants.ConfigFilename, configFetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -163,7 +163,7 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
|||||||
|
|
||||||
i.log.Debugf("Checking cluster ID file")
|
i.log.Debugf("Checking cluster ID file")
|
||||||
var idFile clusterid.File
|
var idFile clusterid.File
|
||||||
if err := i.fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil {
|
if err := i.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||||
return fmt.Errorf("reading cluster ID file: %w", err)
|
return fmt.Errorf("reading cluster ID file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,15 +193,14 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
|||||||
return fmt.Errorf("creating new validator: %w", err)
|
return fmt.Errorf("creating new validator: %w", err)
|
||||||
}
|
}
|
||||||
i.log.Debugf("Created a new validator")
|
i.log.Debugf("Created a new validator")
|
||||||
serviceAccURI, err := i.getMarshaledServiceAccountURI(provider, conf)
|
serviceAccURI, err := i.getMarshaledServiceAccountURI(provider, conf, flags.workspace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
i.log.Debugf("Successfully marshaled service account URI")
|
i.log.Debugf("Successfully marshaled service account URI")
|
||||||
masterSecret, err := i.readOrGenerateMasterSecret(cmd.OutOrStdout(), flags.masterSecretPath)
|
masterSecret, err := i.generateMasterSecret(cmd.OutOrStdout(), flags.workspace)
|
||||||
i.masterSecret = masterSecret
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing or generating master secret from file %s: %w", flags.masterSecretPath, err)
|
return fmt.Errorf("generating master secret: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterName := clusterid.GetClusterName(conf, idFile)
|
clusterName := clusterid.GetClusterName(conf, idFile)
|
||||||
@ -239,8 +238,7 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
|||||||
idFile.CloudProvider = provider
|
idFile.CloudProvider = provider
|
||||||
|
|
||||||
bufferedOutput := &bytes.Buffer{}
|
bufferedOutput := &bytes.Buffer{}
|
||||||
err = i.writeOutput(idFile, resp, flags.mergeConfigs, bufferedOutput)
|
if err := i.writeOutput(idFile, resp, flags.mergeConfigs, bufferedOutput, flags.workspace); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,7 +386,8 @@ func (d *initDoer) handleGRPCStateChanges(ctx context.Context, wg *sync.WaitGrou
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *initCmd) writeOutput(
|
func (i *initCmd) writeOutput(
|
||||||
idFile clusterid.File, initResp *initproto.InitSuccessResponse, mergeConfig bool, wr io.Writer,
|
idFile clusterid.File, initResp *initproto.InitSuccessResponse,
|
||||||
|
mergeConfig bool, wr io.Writer, workspace string,
|
||||||
) error {
|
) error {
|
||||||
fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n")
|
fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n")
|
||||||
|
|
||||||
@ -399,14 +398,14 @@ func (i *initCmd) writeOutput(
|
|||||||
tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0)
|
tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0)
|
||||||
// writeRow(tw, "Constellation cluster's owner identifier", ownerID)
|
// writeRow(tw, "Constellation cluster's owner identifier", ownerID)
|
||||||
writeRow(tw, "Constellation cluster identifier", clusterID)
|
writeRow(tw, "Constellation cluster identifier", clusterID)
|
||||||
writeRow(tw, "Kubernetes configuration", constants.AdminConfFilename)
|
writeRow(tw, "Kubernetes configuration", adminConfPath(workspace))
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
fmt.Fprintln(wr)
|
fmt.Fprintln(wr)
|
||||||
|
|
||||||
if err := i.fileHandler.Write(constants.AdminConfFilename, initResp.GetKubeconfig(), file.OptNone); err != nil {
|
if err := i.fileHandler.Write(constants.AdminConfFilename, initResp.GetKubeconfig(), file.OptNone); err != nil {
|
||||||
return fmt.Errorf("writing kubeconfig: %w", err)
|
return fmt.Errorf("writing kubeconfig: %w", err)
|
||||||
}
|
}
|
||||||
i.log.Debugf("Kubeconfig written to %s", constants.AdminConfFilename)
|
i.log.Debugf("Kubeconfig written to %s", adminConfPath(workspace))
|
||||||
|
|
||||||
if mergeConfig {
|
if mergeConfig {
|
||||||
if err := i.merger.mergeConfigs(constants.AdminConfFilename, i.fileHandler); err != nil {
|
if err := i.merger.mergeConfigs(constants.AdminConfFilename, i.fileHandler); err != nil {
|
||||||
@ -420,14 +419,14 @@ func (i *initCmd) writeOutput(
|
|||||||
idFile.OwnerID = ownerID
|
idFile.OwnerID = ownerID
|
||||||
idFile.ClusterID = clusterID
|
idFile.ClusterID = clusterID
|
||||||
|
|
||||||
if err := i.fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile, file.OptOverwrite); err != nil {
|
if err := i.fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptOverwrite); err != nil {
|
||||||
return fmt.Errorf("writing Constellation ID file: %w", err)
|
return fmt.Errorf("writing Constellation ID file: %w", err)
|
||||||
}
|
}
|
||||||
i.log.Debugf("Constellation ID file written to %s", constants.ClusterIDsFileName)
|
i.log.Debugf("Constellation ID file written to %s", clusterIDsPath(workspace))
|
||||||
|
|
||||||
if !mergeConfig {
|
if !mergeConfig {
|
||||||
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")
|
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")
|
||||||
fmt.Fprintf(wr, "\texport KUBECONFIG=\"$PWD/%s\"\n", constants.AdminConfFilename)
|
fmt.Fprintf(wr, "\texport KUBECONFIG=\"$PWD/%s\"\n", adminConfPath(workspace))
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintln(wr, "Constellation kubeconfig merged with default config.")
|
fmt.Fprintln(wr, "Constellation kubeconfig merged with default config.")
|
||||||
|
|
||||||
@ -448,11 +447,6 @@ func writeRow(wr io.Writer, col1 string, col2 string) {
|
|||||||
// evalFlagArgs gets the flag values and does preprocessing of these values like
|
// evalFlagArgs gets the flag values and does preprocessing of these values like
|
||||||
// reading the content from file path flags and deriving other values from flag combinations.
|
// reading the content from file path flags and deriving other values from flag combinations.
|
||||||
func (i *initCmd) evalFlagArgs(cmd *cobra.Command) (initFlags, error) {
|
func (i *initCmd) evalFlagArgs(cmd *cobra.Command) (initFlags, error) {
|
||||||
masterSecretPath, err := cmd.Flags().GetString("master-secret")
|
|
||||||
if err != nil {
|
|
||||||
return initFlags{}, fmt.Errorf("parsing master-secret path flag: %w", err)
|
|
||||||
}
|
|
||||||
i.log.Debugf("Master secret path flag value is %q", masterSecretPath)
|
|
||||||
conformance, err := cmd.Flags().GetBool("conformance")
|
conformance, err := cmd.Flags().GetBool("conformance")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return initFlags{}, fmt.Errorf("parsing conformance flag: %w", err)
|
return initFlags{}, fmt.Errorf("parsing conformance flag: %w", err)
|
||||||
@ -467,7 +461,7 @@ func (i *initCmd) evalFlagArgs(cmd *cobra.Command) (initFlags, error) {
|
|||||||
helmWaitMode = helm.WaitModeNone
|
helmWaitMode = helm.WaitModeNone
|
||||||
}
|
}
|
||||||
i.log.Debugf("Helm wait flag is %t", skipHelmWait)
|
i.log.Debugf("Helm wait flag is %t", skipHelmWait)
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return initFlags{}, fmt.Errorf("parsing config path flag: %w", err)
|
return initFlags{}, fmt.Errorf("parsing config path flag: %w", err)
|
||||||
}
|
}
|
||||||
@ -485,43 +479,25 @@ func (i *initCmd) evalFlagArgs(cmd *cobra.Command) (initFlags, error) {
|
|||||||
i.log.Debugf("force flag is %t", force)
|
i.log.Debugf("force flag is %t", force)
|
||||||
|
|
||||||
return initFlags{
|
return initFlags{
|
||||||
configPath: configPath,
|
workspace: workspace,
|
||||||
conformance: conformance,
|
conformance: conformance,
|
||||||
helmWaitMode: helmWaitMode,
|
helmWaitMode: helmWaitMode,
|
||||||
masterSecretPath: masterSecretPath,
|
force: force,
|
||||||
force: force,
|
mergeConfigs: mergeConfigs,
|
||||||
mergeConfigs: mergeConfigs,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initFlags are the resulting values of flag preprocessing.
|
// initFlags are the resulting values of flag preprocessing.
|
||||||
type initFlags struct {
|
type initFlags struct {
|
||||||
configPath string
|
workspace string
|
||||||
masterSecretPath string
|
conformance bool
|
||||||
conformance bool
|
helmWaitMode helm.WaitMode
|
||||||
helmWaitMode helm.WaitMode
|
force bool
|
||||||
force bool
|
mergeConfigs bool
|
||||||
mergeConfigs bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readOrGenerateMasterSecret reads a base64 encoded master secret from file or generates a new 32 byte secret.
|
// readOrGenerateMasterSecret reads a base64 encoded master secret from file or generates a new 32 byte secret.
|
||||||
func (i *initCmd) readOrGenerateMasterSecret(outWriter io.Writer, filename string) (uri.MasterSecret, error) {
|
func (i *initCmd) generateMasterSecret(outWriter io.Writer, workspace string) (uri.MasterSecret, error) {
|
||||||
if filename != "" {
|
|
||||||
i.log.Debugf("Reading master secret from file %q", filename)
|
|
||||||
var secret uri.MasterSecret
|
|
||||||
if err := i.fileHandler.ReadJSON(filename, &secret); err != nil {
|
|
||||||
return uri.MasterSecret{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(secret.Key) < crypto.MasterSecretLengthMin {
|
|
||||||
return uri.MasterSecret{}, fmt.Errorf("provided master secret is smaller than the required minimum of %d Bytes", crypto.MasterSecretLengthMin)
|
|
||||||
}
|
|
||||||
if len(secret.Salt) < crypto.RNGLengthDefault {
|
|
||||||
return uri.MasterSecret{}, fmt.Errorf("provided salt is smaller than the required minimum of %d Bytes", crypto.RNGLengthDefault)
|
|
||||||
}
|
|
||||||
return secret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// No file given, generate a new secret, and save it to disk
|
// No file given, generate a new secret, and save it to disk
|
||||||
i.log.Debugf("Generating new master secret")
|
i.log.Debugf("Generating new master secret")
|
||||||
key, err := crypto.GenerateRandomBytes(crypto.MasterSecretLengthDefault)
|
key, err := crypto.GenerateRandomBytes(crypto.MasterSecretLengthDefault)
|
||||||
@ -540,21 +516,21 @@ func (i *initCmd) readOrGenerateMasterSecret(outWriter io.Writer, filename strin
|
|||||||
if err := i.fileHandler.WriteJSON(constants.MasterSecretFilename, secret, file.OptNone); err != nil {
|
if err := i.fileHandler.WriteJSON(constants.MasterSecretFilename, secret, file.OptNone); err != nil {
|
||||||
return uri.MasterSecret{}, err
|
return uri.MasterSecret{}, err
|
||||||
}
|
}
|
||||||
fmt.Fprintf(outWriter, "Your Constellation master secret was successfully written to ./%s\n", constants.MasterSecretFilename)
|
fmt.Fprintf(outWriter, "Your Constellation master secret was successfully written to %q\n", masterSecretPath(workspace))
|
||||||
return secret, nil
|
return secret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *initCmd) getMarshaledServiceAccountURI(provider cloudprovider.Provider, config *config.Config) (string, error) {
|
func (i *initCmd) getMarshaledServiceAccountURI(provider cloudprovider.Provider, config *config.Config, workspace string,
|
||||||
|
) (string, error) {
|
||||||
i.log.Debugf("Getting service account URI")
|
i.log.Debugf("Getting service account URI")
|
||||||
switch provider {
|
switch provider {
|
||||||
case cloudprovider.GCP:
|
case cloudprovider.GCP:
|
||||||
i.log.Debugf("Handling case for GCP")
|
i.log.Debugf("Handling case for GCP")
|
||||||
path := config.Provider.GCP.ServiceAccountKeyPath
|
i.log.Debugf("GCP service account key path %s", filepath.Join(workspace, config.Provider.GCP.ServiceAccountKeyPath))
|
||||||
i.log.Debugf("GCP service account key path %s", path)
|
|
||||||
|
|
||||||
var key gcpshared.ServiceAccountKey
|
var key gcpshared.ServiceAccountKey
|
||||||
if err := i.fileHandler.ReadJSON(path, &key); err != nil {
|
if err := i.fileHandler.ReadJSON(config.Provider.GCP.ServiceAccountKeyPath, &key); err != nil {
|
||||||
return "", fmt.Errorf("reading service account key from path %q: %w", path, err)
|
return "", fmt.Errorf("reading service account key from path %q: %w", filepath.Join(workspace, config.Provider.GCP.ServiceAccountKeyPath), err)
|
||||||
}
|
}
|
||||||
i.log.Debugf("Read GCP service account key from path")
|
i.log.Debugf("Read GCP service account key from path")
|
||||||
return key.ToCloudServiceAccountURI(), nil
|
return key.ToCloudServiceAccountURI(), nil
|
||||||
@ -667,3 +643,7 @@ func (e *nonRetriableError) Error() string {
|
|||||||
func (e *nonRetriableError) Unwrap() error {
|
func (e *nonRetriableError) Unwrap() error {
|
||||||
return e.err
|
return e.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type initializer interface {
|
||||||
|
Install(ctx context.Context, releases *helm.Releases) error
|
||||||
|
}
|
||||||
|
@ -162,8 +162,8 @@ func TestInitialize(t *testing.T) {
|
|||||||
cmd.SetErr(&errOut)
|
cmd.SetErr(&errOut)
|
||||||
|
|
||||||
// Flags
|
// Flags
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
|
|
||||||
// File system preparation
|
// File system preparation
|
||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
@ -175,7 +175,7 @@ func TestInitialize(t *testing.T) {
|
|||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptNone))
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptNone))
|
||||||
if tc.idFile != nil {
|
if tc.idFile != nil {
|
||||||
tc.idFile.CloudProvider = tc.provider
|
tc.idFile.CloudProvider = tc.provider
|
||||||
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFileName, tc.idFile, file.OptNone))
|
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, tc.idFile, file.OptNone))
|
||||||
}
|
}
|
||||||
if tc.serviceAccKey != nil {
|
if tc.serviceAccKey != nil {
|
||||||
require.NoError(fileHandler.WriteJSON(serviceAccPath, tc.serviceAccKey, file.OptNone))
|
require.NoError(fileHandler.WriteJSON(serviceAccPath, tc.serviceAccKey, file.OptNone))
|
||||||
@ -301,9 +301,8 @@ func TestWriteOutput(t *testing.T) {
|
|||||||
UID: "test-uid",
|
UID: "test-uid",
|
||||||
IP: "cluster-ip",
|
IP: "cluster-ip",
|
||||||
}
|
}
|
||||||
|
|
||||||
i := newInitCmd(nil, nil, fileHandler, nil, &stubMerger{}, logger.NewTest(t))
|
i := newInitCmd(nil, nil, fileHandler, nil, &stubMerger{}, logger.NewTest(t))
|
||||||
err := i.writeOutput(idFile, resp.GetInitSuccess(), false, &out)
|
err := i.writeOutput(idFile, resp.GetInitSuccess(), false, &out, "")
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
// assert.Contains(out.String(), ownerID)
|
// assert.Contains(out.String(), ownerID)
|
||||||
assert.Contains(out.String(), clusterID)
|
assert.Contains(out.String(), clusterID)
|
||||||
@ -314,29 +313,39 @@ func TestWriteOutput(t *testing.T) {
|
|||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.Equal(string(resp.GetInitSuccess().GetKubeconfig()), string(adminConf))
|
assert.Equal(string(resp.GetInitSuccess().GetKubeconfig()), string(adminConf))
|
||||||
|
|
||||||
idsFile, err := afs.ReadFile(constants.ClusterIDsFileName)
|
idsFile, err := afs.ReadFile(constants.ClusterIDsFilename)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
var testIDFile clusterid.File
|
var testIDFile clusterid.File
|
||||||
err = json.Unmarshal(idsFile, &testIDFile)
|
err = json.Unmarshal(idsFile, &testIDFile)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.Equal(expectedIDFile, testIDFile)
|
assert.Equal(expectedIDFile, testIDFile)
|
||||||
|
|
||||||
// test config merging
|
|
||||||
out.Reset()
|
out.Reset()
|
||||||
require.NoError(afs.Remove(constants.AdminConfFilename))
|
require.NoError(afs.Remove(constants.AdminConfFilename))
|
||||||
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out)
|
|
||||||
|
// test custom workspace
|
||||||
|
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out, "some/path")
|
||||||
|
require.NoError(err)
|
||||||
|
// assert.Contains(out.String(), ownerID)
|
||||||
|
assert.Contains(out.String(), clusterID)
|
||||||
|
assert.Contains(out.String(), adminConfPath("some/path"))
|
||||||
|
out.Reset()
|
||||||
|
// File is written to current working dir, we simply pass the workspace for generating readable user output
|
||||||
|
require.NoError(afs.Remove(constants.AdminConfFilename))
|
||||||
|
|
||||||
|
// test config merging
|
||||||
|
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out, "")
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
// assert.Contains(out.String(), ownerID)
|
// assert.Contains(out.String(), ownerID)
|
||||||
assert.Contains(out.String(), clusterID)
|
assert.Contains(out.String(), clusterID)
|
||||||
assert.Contains(out.String(), constants.AdminConfFilename)
|
assert.Contains(out.String(), constants.AdminConfFilename)
|
||||||
assert.Contains(out.String(), "Constellation kubeconfig merged with default config")
|
assert.Contains(out.String(), "Constellation kubeconfig merged with default config")
|
||||||
assert.Contains(out.String(), "You can now connect to your cluster")
|
assert.Contains(out.String(), "You can now connect to your cluster")
|
||||||
|
out.Reset()
|
||||||
|
require.NoError(afs.Remove(constants.AdminConfFilename))
|
||||||
|
|
||||||
// test config merging with env vars set
|
// test config merging with env vars set
|
||||||
i.merger = &stubMerger{envVar: "/some/path/to/kubeconfig"}
|
i.merger = &stubMerger{envVar: "/some/path/to/kubeconfig"}
|
||||||
out.Reset()
|
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out, "")
|
||||||
require.NoError(afs.Remove(constants.AdminConfFilename))
|
|
||||||
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out)
|
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
// assert.Contains(out.String(), ownerID)
|
// assert.Contains(out.String(), ownerID)
|
||||||
assert.Contains(out.String(), clusterID)
|
assert.Contains(out.String(), clusterID)
|
||||||
@ -345,79 +354,29 @@ func TestWriteOutput(t *testing.T) {
|
|||||||
assert.Contains(out.String(), "Warning: KUBECONFIG environment variable is set")
|
assert.Contains(out.String(), "Warning: KUBECONFIG environment variable is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadOrGenerateMasterSecret(t *testing.T) {
|
func TestGenerateMasterSecret(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
filename string
|
|
||||||
createFileFunc func(handler file.Handler) error
|
createFileFunc func(handler file.Handler) error
|
||||||
fs func() afero.Fs
|
fs func() afero.Fs
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"file with secret exists": {
|
"file already exists": {
|
||||||
filename: "someSecret",
|
fs: afero.NewMemMapFs,
|
||||||
fs: afero.NewMemMapFs,
|
|
||||||
createFileFunc: func(handler file.Handler) error {
|
createFileFunc: func(handler file.Handler) error {
|
||||||
return handler.WriteJSON(
|
return handler.WriteJSON(
|
||||||
"someSecret",
|
constants.MasterSecretFilename,
|
||||||
uri.MasterSecret{Key: []byte("constellation-master-secret"), Salt: []byte("constellation-32Byte-length-salt")},
|
uri.MasterSecret{Key: []byte("constellation-master-secret"), Salt: []byte("constellation-32Byte-length-salt")},
|
||||||
file.OptNone,
|
file.OptNone,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
wantErr: false,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"no file given": {
|
"file does not exist": {
|
||||||
filename: "",
|
|
||||||
createFileFunc: func(handler file.Handler) error { return nil },
|
createFileFunc: func(handler file.Handler) error { return nil },
|
||||||
fs: afero.NewMemMapFs,
|
fs: afero.NewMemMapFs,
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
"file does not exist": {
|
|
||||||
filename: "nonExistingSecret",
|
|
||||||
createFileFunc: func(handler file.Handler) error { return nil },
|
|
||||||
fs: afero.NewMemMapFs,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"file is empty": {
|
|
||||||
filename: "emptySecret",
|
|
||||||
createFileFunc: func(handler file.Handler) error {
|
|
||||||
return handler.Write("emptySecret", []byte{}, file.OptNone)
|
|
||||||
},
|
|
||||||
fs: afero.NewMemMapFs,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"salt too short": {
|
|
||||||
filename: "shortSecret",
|
|
||||||
createFileFunc: func(handler file.Handler) error {
|
|
||||||
return handler.WriteJSON(
|
|
||||||
"shortSecret",
|
|
||||||
uri.MasterSecret{Key: []byte("constellation-master-secret"), Salt: []byte("short")},
|
|
||||||
file.OptNone,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
fs: afero.NewMemMapFs,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"key too short": {
|
|
||||||
filename: "shortSecret",
|
|
||||||
createFileFunc: func(handler file.Handler) error {
|
|
||||||
return handler.WriteJSON(
|
|
||||||
"shortSecret",
|
|
||||||
uri.MasterSecret{Key: []byte("short"), Salt: []byte("constellation-32Byte-length-salt")},
|
|
||||||
file.OptNone,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
fs: afero.NewMemMapFs,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"invalid file content": {
|
|
||||||
filename: "unencodedSecret",
|
|
||||||
createFileFunc: func(handler file.Handler) error {
|
|
||||||
return handler.Write("unencodedSecret", []byte("invalid-constellation-master-secret"), file.OptNone)
|
|
||||||
},
|
|
||||||
fs: afero.NewMemMapFs,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"file not writeable": {
|
"file not writeable": {
|
||||||
filename: "",
|
|
||||||
createFileFunc: func(handler file.Handler) error { return nil },
|
createFileFunc: func(handler file.Handler) error { return nil },
|
||||||
fs: func() afero.Fs { return afero.NewReadOnlyFs(afero.NewMemMapFs()) },
|
fs: func() afero.Fs { return afero.NewReadOnlyFs(afero.NewMemMapFs()) },
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -434,21 +393,17 @@ func TestReadOrGenerateMasterSecret(t *testing.T) {
|
|||||||
|
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
i := newInitCmd(nil, nil, fileHandler, nil, nil, logger.NewTest(t))
|
i := newInitCmd(nil, nil, fileHandler, nil, nil, logger.NewTest(t))
|
||||||
secret, err := i.readOrGenerateMasterSecret(&out, tc.filename)
|
secret, err := i.generateMasterSecret(&out, "")
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
} else {
|
} else {
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
|
||||||
if tc.filename == "" {
|
require.Contains(out.String(), constants.MasterSecretFilename)
|
||||||
require.Contains(out.String(), constants.MasterSecretFilename)
|
|
||||||
filename := strings.Split(out.String(), "./")
|
|
||||||
tc.filename = strings.Trim(filename[1], "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
var masterSecret uri.MasterSecret
|
var masterSecret uri.MasterSecret
|
||||||
require.NoError(fileHandler.ReadJSON(tc.filename, &masterSecret))
|
require.NoError(fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret))
|
||||||
assert.Equal(masterSecret.Key, secret.Key)
|
assert.Equal(masterSecret.Key, secret.Key)
|
||||||
assert.Equal(masterSecret.Salt, secret.Salt)
|
assert.Equal(masterSecret.Salt, secret.Salt)
|
||||||
}
|
}
|
||||||
@ -491,8 +446,8 @@ func TestAttestation(t *testing.T) {
|
|||||||
defer initServer.GracefulStop()
|
defer initServer.GracefulStop()
|
||||||
|
|
||||||
cmd := NewInitCmd()
|
cmd := NewInitCmd()
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
cmd.SetOut(&out)
|
cmd.SetOut(&out)
|
||||||
var errOut bytes.Buffer
|
var errOut bytes.Buffer
|
||||||
@ -500,7 +455,7 @@ func TestAttestation(t *testing.T) {
|
|||||||
|
|
||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
fileHandler := file.NewHandler(fs)
|
fileHandler := file.NewHandler(fs)
|
||||||
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFileName, existingIDFile, file.OptNone))
|
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, existingIDFile, file.OptNone))
|
||||||
|
|
||||||
cfg := config.Default()
|
cfg := config.Default()
|
||||||
cfg.Image = "v0.0.0" // is the default version of the the CLI (before build injects the real version)
|
cfg.Image = "v0.0.0" // is the default version of the the CLI (before build injects the real version)
|
||||||
|
@ -45,7 +45,7 @@ func runDown(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
func checkForMiniCluster(fileHandler file.Handler) error {
|
func checkForMiniCluster(fileHandler file.Handler) error {
|
||||||
var idFile clusterid.File
|
var idFile clusterid.File
|
||||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil {
|
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if idFile.CloudProvider != cloudprovider.QEMU {
|
if idFile.CloudProvider != cloudprovider.QEMU {
|
||||||
|
@ -39,8 +39,6 @@ func newMiniUpCmd() *cobra.Command {
|
|||||||
RunE: runUp,
|
RunE: runUp,
|
||||||
}
|
}
|
||||||
|
|
||||||
// override global flag so we don't have a default value for the config
|
|
||||||
cmd.Flags().String("config", "", "path to the configuration file to use for the cluster")
|
|
||||||
cmd.Flags().Bool("merge-kubeconfig", true, "merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config")
|
cmd.Flags().Bool("merge-kubeconfig", true, "merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
@ -88,7 +86,7 @@ func (m *miniUpCmd) up(cmd *cobra.Command, creator cloudCreator, spinner spinner
|
|||||||
|
|
||||||
// create cluster
|
// create cluster
|
||||||
spinner.Start("Creating cluster in QEMU ", false)
|
spinner.Start("Creating cluster in QEMU ", false)
|
||||||
err = m.createMiniCluster(cmd.Context(), fileHandler, creator, config, flags.tfLogLevel)
|
err = m.createMiniCluster(cmd.Context(), fileHandler, creator, config, flags)
|
||||||
spinner.Stop()
|
spinner.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating cluster: %w", err)
|
return fmt.Errorf("creating cluster: %w", err)
|
||||||
@ -112,38 +110,28 @@ func (m *miniUpCmd) up(cmd *cobra.Command, creator cloudCreator, spinner spinner
|
|||||||
|
|
||||||
// prepareConfig reads a given config, or creates a new minimal QEMU config.
|
// prepareConfig reads a given config, or creates a new minimal QEMU config.
|
||||||
func (m *miniUpCmd) prepareConfig(cmd *cobra.Command, fileHandler file.Handler, flags upFlags) (*config.Config, error) {
|
func (m *miniUpCmd) prepareConfig(cmd *cobra.Command, fileHandler file.Handler, flags upFlags) (*config.Config, error) {
|
||||||
// check for existing config
|
|
||||||
if flags.configPath != "" {
|
|
||||||
conf, err := config.New(fileHandler, flags.configPath, m.configFetcher, flags.force)
|
|
||||||
var configValidationErr *config.ValidationError
|
|
||||||
if errors.As(err, &configValidationErr) {
|
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if conf.GetProvider() != cloudprovider.QEMU {
|
|
||||||
return nil, errors.New("invalid provider for MiniConstellation cluster")
|
|
||||||
}
|
|
||||||
return conf, nil
|
|
||||||
}
|
|
||||||
m.log.Debugf("Configuration path is %q", flags.configPath)
|
|
||||||
if err := cmd.Flags().Set("config", constants.ConfigFilename); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_, err := fileHandler.Stat(constants.ConfigFilename)
|
_, err := fileHandler.Stat(constants.ConfigFilename)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// config already exists, prompt user to overwrite
|
// config already exists, prompt user if they want to use this file
|
||||||
cmd.PrintErrln("A config file already exists in the current workspace. Use --config to use an existing config file.")
|
cmd.PrintErrln("A config file already exists in the configured workspace.")
|
||||||
ok, err := askToConfirm(cmd, "Do you want to overwrite it?")
|
ok, err := askToConfirm(cmd, "Do you want to create the Constellation using that config?")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if ok {
|
||||||
|
return m.prepareExistingConfig(cmd, fileHandler, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// user declined to reuse config file, prompt if they want to overwrite it
|
||||||
|
ok, err = askToConfirm(cmd, "Do you want to overwrite it and create a new config?")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("not overwriting existing config")
|
return nil, errors.New("not overwriting existing config")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !featureset.CanUseEmbeddedMeasurmentsAndImage {
|
if !featureset.CanUseEmbeddedMeasurmentsAndImage {
|
||||||
cmd.PrintErrln("Generating a valid default config is not supported in the OSS build of the Constellation CLI. Consult the documentation for instructions on where to download the enterprise version.")
|
cmd.PrintErrln("Generating a valid default config is not supported in the OSS build of the Constellation CLI. Consult the documentation for instructions on where to download the enterprise version.")
|
||||||
return nil, errors.New("cannot create a mini cluster without a config file in the OSS build")
|
return nil, errors.New("cannot create a mini cluster without a config file in the OSS build")
|
||||||
@ -157,13 +145,29 @@ func (m *miniUpCmd) prepareConfig(cmd *cobra.Command, fileHandler file.Handler,
|
|||||||
return config, fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptOverwrite)
|
return config, fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptOverwrite)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *miniUpCmd) prepareExistingConfig(cmd *cobra.Command, fileHandler file.Handler, flags upFlags) (*config.Config, error) {
|
||||||
|
conf, err := config.New(fileHandler, constants.ConfigFilename, m.configFetcher, flags.force)
|
||||||
|
var configValidationErr *config.ValidationError
|
||||||
|
if errors.As(err, &configValidationErr) {
|
||||||
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if conf.GetProvider() != cloudprovider.QEMU {
|
||||||
|
return nil, errors.New("invalid provider for MiniConstellation cluster")
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
// createMiniCluster creates a new cluster using the given config.
|
// createMiniCluster creates a new cluster using the given config.
|
||||||
func (m *miniUpCmd) createMiniCluster(ctx context.Context, fileHandler file.Handler, creator cloudCreator, config *config.Config, tfLogLevel terraform.LogLevel) error {
|
func (m *miniUpCmd) createMiniCluster(ctx context.Context, fileHandler file.Handler, creator cloudCreator, config *config.Config, flags upFlags) error {
|
||||||
m.log.Debugf("Creating mini cluster")
|
m.log.Debugf("Creating mini cluster")
|
||||||
opts := cloudcmd.CreateOptions{
|
opts := cloudcmd.CreateOptions{
|
||||||
Provider: cloudprovider.QEMU,
|
Provider: cloudprovider.QEMU,
|
||||||
Config: config,
|
Config: config,
|
||||||
TFLogLevel: tfLogLevel,
|
TFWorkspace: constants.TerraformWorkingDir,
|
||||||
|
TFLogLevel: flags.tfLogLevel,
|
||||||
}
|
}
|
||||||
idFile, err := creator.Create(ctx, opts)
|
idFile, err := creator.Create(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -172,7 +176,7 @@ func (m *miniUpCmd) createMiniCluster(ctx context.Context, fileHandler file.Hand
|
|||||||
|
|
||||||
idFile.UID = constants.MiniConstellationUID // use UID "mini" to identify MiniConstellation clusters.
|
idFile.UID = constants.MiniConstellationUID // use UID "mini" to identify MiniConstellation clusters.
|
||||||
m.log.Debugf("Cluster id file contains %v", idFile)
|
m.log.Debugf("Cluster id file contains %v", idFile)
|
||||||
return fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile, file.OptNone)
|
return fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initializeMiniCluster initializes a QEMU cluster.
|
// initializeMiniCluster initializes a QEMU cluster.
|
||||||
@ -191,7 +195,6 @@ func (m *miniUpCmd) initializeMiniCluster(cmd *cobra.Command, fileHandler file.H
|
|||||||
return dialer.New(nil, validator, &net.Dialer{})
|
return dialer.New(nil, validator, &net.Dialer{})
|
||||||
}
|
}
|
||||||
m.log.Debugf("Created new dialer")
|
m.log.Debugf("Created new dialer")
|
||||||
cmd.Flags().String("master-secret", "", "")
|
|
||||||
cmd.Flags().String("endpoint", "", "")
|
cmd.Flags().String("endpoint", "", "")
|
||||||
cmd.Flags().Bool("conformance", false, "")
|
cmd.Flags().Bool("conformance", false, "")
|
||||||
cmd.Flags().Bool("skip-helm-wait", false, "install helm charts without waiting for deployments to be ready")
|
cmd.Flags().Bool("skip-helm-wait", false, "install helm charts without waiting for deployments to be ready")
|
||||||
@ -202,7 +205,7 @@ func (m *miniUpCmd) initializeMiniCluster(cmd *cobra.Command, fileHandler file.H
|
|||||||
m.log.Debugf("Created new logger")
|
m.log.Debugf("Created new logger")
|
||||||
defer log.Sync()
|
defer log.Sync()
|
||||||
|
|
||||||
helmInstaller, err := helm.NewInitializer(log)
|
helmInstaller, err := helm.NewInitializer(log, constants.AdminConfFilename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating Helm installer: %w", err)
|
return fmt.Errorf("creating Helm installer: %w", err)
|
||||||
}
|
}
|
||||||
@ -220,14 +223,13 @@ func (m *miniUpCmd) initializeMiniCluster(cmd *cobra.Command, fileHandler file.H
|
|||||||
}
|
}
|
||||||
|
|
||||||
type upFlags struct {
|
type upFlags struct {
|
||||||
configPath string
|
|
||||||
force bool
|
force bool
|
||||||
tfLogLevel terraform.LogLevel
|
tfLogLevel terraform.LogLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *miniUpCmd) parseUpFlags(cmd *cobra.Command) (upFlags, error) {
|
func (m *miniUpCmd) parseUpFlags(cmd *cobra.Command) (upFlags, error) {
|
||||||
m.log.Debugf("Preparing configuration")
|
m.log.Debugf("Preparing configuration")
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return upFlags{}, fmt.Errorf("parsing config string: %w", err)
|
return upFlags{}, fmt.Errorf("parsing config string: %w", err)
|
||||||
}
|
}
|
||||||
@ -246,10 +248,9 @@ func (m *miniUpCmd) parseUpFlags(cmd *cobra.Command) (upFlags, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return upFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
return upFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
||||||
}
|
}
|
||||||
m.log.Debugf("Terraform logs will be written into %s at level %s", constants.TerraformLogFile, logLevel.String())
|
m.log.Debugf("Terraform logs will be written into %s at level %s", terraformLogPath(workspace), logLevel.String())
|
||||||
|
|
||||||
return upFlags{
|
return upFlags{
|
||||||
configPath: configPath,
|
|
||||||
force: force,
|
force: force,
|
||||||
tfLogLevel: logLevel,
|
tfLogLevel: logLevel,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -44,7 +44,6 @@ func NewRecoverCmd() *cobra.Command {
|
|||||||
RunE: runRecover,
|
RunE: runRecover,
|
||||||
}
|
}
|
||||||
cmd.Flags().StringP("endpoint", "e", "", "endpoint of the instance, passed as HOST[:PORT]")
|
cmd.Flags().StringP("endpoint", "e", "", "endpoint of the instance, passed as HOST[:PORT]")
|
||||||
cmd.Flags().String("master-secret", constants.MasterSecretFilename, "path to master secret file")
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,13 +77,13 @@ func (r *recoverCmd) recover(
|
|||||||
r.log.Debugf("Using flags: %+v", flags)
|
r.log.Debugf("Using flags: %+v", flags)
|
||||||
|
|
||||||
var masterSecret uri.MasterSecret
|
var masterSecret uri.MasterSecret
|
||||||
r.log.Debugf("Loading master secret file from %s", flags.secretPath)
|
r.log.Debugf("Loading master secret file from %s", masterSecretPath(flags.workspace))
|
||||||
if err := fileHandler.ReadJSON(flags.secretPath, &masterSecret); err != nil {
|
if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.log.Debugf("Loading configuration file from %q", flags.configPath)
|
r.log.Debugf("Loading configuration file from %q", configPath(flags.workspace))
|
||||||
conf, err := config.New(fileHandler, flags.configPath, r.configFetcher, flags.force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, r.configFetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -211,16 +210,21 @@ func (d *recoverDoer) setURIs(kmsURI, storageURI string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type recoverFlags struct {
|
type recoverFlags struct {
|
||||||
endpoint string
|
endpoint string
|
||||||
secretPath string
|
workspace string
|
||||||
configPath string
|
maaURL string
|
||||||
maaURL string
|
force bool
|
||||||
force bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recoverCmd) parseRecoverFlags(cmd *cobra.Command, fileHandler file.Handler) (recoverFlags, error) {
|
func (r *recoverCmd) parseRecoverFlags(cmd *cobra.Command, fileHandler file.Handler) (recoverFlags, error) {
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return recoverFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
||||||
|
}
|
||||||
|
r.log.Debugf("Workspace set to %q", workspace)
|
||||||
|
|
||||||
var idFile clusterid.File
|
var idFile clusterid.File
|
||||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
|
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
|
||||||
return recoverFlags{}, err
|
return recoverFlags{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,16 +241,6 @@ func (r *recoverCmd) parseRecoverFlags(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
return recoverFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
|
return recoverFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
|
||||||
}
|
}
|
||||||
r.log.Debugf("Endpoint value after parsing is %s", endpoint)
|
r.log.Debugf("Endpoint value after parsing is %s", endpoint)
|
||||||
masterSecretPath, err := cmd.Flags().GetString("master-secret")
|
|
||||||
if err != nil {
|
|
||||||
return recoverFlags{}, fmt.Errorf("parsing master-secret path argument: %w", err)
|
|
||||||
}
|
|
||||||
r.log.Debugf("Master secret flag is %s", masterSecretPath)
|
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
|
||||||
if err != nil {
|
|
||||||
return recoverFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
|
||||||
}
|
|
||||||
r.log.Debugf("Configuration path flag is %s", configPath)
|
|
||||||
|
|
||||||
force, err := cmd.Flags().GetBool("force")
|
force, err := cmd.Flags().GetBool("force")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -254,11 +248,10 @@ func (r *recoverCmd) parseRecoverFlags(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
}
|
}
|
||||||
|
|
||||||
return recoverFlags{
|
return recoverFlags{
|
||||||
endpoint: endpoint,
|
endpoint: endpoint,
|
||||||
secretPath: masterSecretPath,
|
workspace: workspace,
|
||||||
configPath: configPath,
|
maaURL: idFile.AttestationURL,
|
||||||
maaURL: idFile.AttestationURL,
|
force: force,
|
||||||
force: force,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,12 +67,12 @@ func TestRecover(t *testing.T) {
|
|||||||
lbErr := grpcstatus.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed: read tcp`)
|
lbErr := grpcstatus.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed: read tcp`)
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
doer *stubDoer
|
doer *stubDoer
|
||||||
masterSecret testvector.HKDF
|
masterSecret testvector.HKDF
|
||||||
endpoint string
|
endpoint string
|
||||||
configFlag string
|
successfulCalls int
|
||||||
successfulCalls int
|
skipConfigCreation bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"works": {
|
"works": {
|
||||||
doer: &stubDoer{returns: []error{nil}},
|
doer: &stubDoer{returns: []error{nil}},
|
||||||
@ -81,11 +81,11 @@ func TestRecover(t *testing.T) {
|
|||||||
successfulCalls: 1,
|
successfulCalls: 1,
|
||||||
},
|
},
|
||||||
"missing config": {
|
"missing config": {
|
||||||
doer: &stubDoer{returns: []error{nil}},
|
doer: &stubDoer{returns: []error{nil}},
|
||||||
endpoint: "192.0.2.89",
|
endpoint: "192.0.2.89",
|
||||||
masterSecret: testvector.HKDFZero,
|
masterSecret: testvector.HKDFZero,
|
||||||
configFlag: "nonexistent-config",
|
skipConfigCreation: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"success multiple nodes": {
|
"success multiple nodes": {
|
||||||
doer: &stubDoer{returns: []error{nil, nil}},
|
doer: &stubDoer{returns: []error{nil, nil}},
|
||||||
@ -139,22 +139,20 @@ func TestRecover(t *testing.T) {
|
|||||||
|
|
||||||
cmd := NewRecoverCmd()
|
cmd := NewRecoverCmd()
|
||||||
cmd.SetContext(context.Background())
|
cmd.SetContext(context.Background())
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
out := &bytes.Buffer{}
|
out := &bytes.Buffer{}
|
||||||
cmd.SetOut(out)
|
cmd.SetOut(out)
|
||||||
cmd.SetErr(out)
|
cmd.SetErr(out)
|
||||||
require.NoError(cmd.Flags().Set("endpoint", tc.endpoint))
|
require.NoError(cmd.Flags().Set("endpoint", tc.endpoint))
|
||||||
|
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
fileHandler := file.NewHandler(fs)
|
fileHandler := file.NewHandler(fs)
|
||||||
|
|
||||||
config := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.GCP)
|
if !tc.skipConfigCreation {
|
||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config))
|
config := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.GCP)
|
||||||
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config))
|
||||||
|
}
|
||||||
|
|
||||||
require.NoError(fileHandler.WriteJSON(
|
require.NoError(fileHandler.WriteJSON(
|
||||||
"constellation-mastersecret.json",
|
"constellation-mastersecret.json",
|
||||||
@ -193,15 +191,13 @@ func TestParseRecoverFlags(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
"no flags": {
|
"no flags": {
|
||||||
wantFlags: recoverFlags{
|
wantFlags: recoverFlags{
|
||||||
endpoint: "192.0.2.42:9999",
|
endpoint: "192.0.2.42:9999",
|
||||||
secretPath: "constellation-mastersecret.json",
|
|
||||||
},
|
},
|
||||||
writeIDFile: true,
|
writeIDFile: true,
|
||||||
},
|
},
|
||||||
"no flags, no ID file": {
|
"no flags, no ID file": {
|
||||||
wantFlags: recoverFlags{
|
wantFlags: recoverFlags{
|
||||||
endpoint: "192.0.2.42:9999",
|
endpoint: "192.0.2.42:9999",
|
||||||
secretPath: "constellation-mastersecret.json",
|
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -210,11 +206,10 @@ func TestParseRecoverFlags(t *testing.T) {
|
|||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"all args set": {
|
"all args set": {
|
||||||
args: []string{"-e", "192.0.2.42:2", "--config", "config-path", "--master-secret", "/path/super-secret.json"},
|
args: []string{"-e", "192.0.2.42:2", "--workspace", "./constellation-workspace"},
|
||||||
wantFlags: recoverFlags{
|
wantFlags: recoverFlags{
|
||||||
endpoint: "192.0.2.42:2",
|
endpoint: "192.0.2.42:2",
|
||||||
secretPath: "/path/super-secret.json",
|
workspace: "./constellation-workspace",
|
||||||
configPath: "config-path",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -225,13 +220,13 @@ func TestParseRecoverFlags(t *testing.T) {
|
|||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
cmd := NewRecoverCmd()
|
cmd := NewRecoverCmd()
|
||||||
cmd.Flags().String("config", "", "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", false, "") // register persistent flag manually
|
cmd.Flags().Bool("force", false, "") // register persistent flag manually
|
||||||
require.NoError(cmd.ParseFlags(tc.args))
|
require.NoError(cmd.ParseFlags(tc.args))
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
if tc.writeIDFile {
|
if tc.writeIDFile {
|
||||||
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFileName, &clusterid.File{IP: "192.0.2.42"}))
|
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, &clusterid.File{IP: "192.0.2.42"}))
|
||||||
}
|
}
|
||||||
r := &recoverCmd{log: logger.NewTest(t)}
|
r := &recoverCmd{log: logger.NewTest(t)}
|
||||||
flags, err := r.parseRecoverFlags(cmd, fileHandler)
|
flags, err := r.parseRecoverFlags(cmd, fileHandler)
|
||||||
|
@ -52,6 +52,11 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
|||||||
|
|
||||||
kubeClient := kubectl.New()
|
kubeClient := kubectl.New()
|
||||||
|
|
||||||
|
flags, err := parseStatusFlags(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing flags: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
kubeConfig, err := fileHandler.Read(constants.AdminConfFilename)
|
kubeConfig, err := fileHandler.Read(constants.AdminConfFilename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -74,7 +79,9 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// need helm client to fetch service versions.
|
// need helm client to fetch service versions.
|
||||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.AdminConfFilename, constants.HelmNamespace, log)
|
// The client used here, doesn't need to know the current workspace.
|
||||||
|
// It may be refactored in the future for easier usage.
|
||||||
|
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("setting up helm client: %w", err)
|
return fmt.Errorf("setting up helm client: %w", err)
|
||||||
}
|
}
|
||||||
@ -84,16 +91,8 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
|||||||
|
|
||||||
stableClient := kubernetes.NewStableClient(kubeClient)
|
stableClient := kubernetes.NewStableClient(kubeClient)
|
||||||
|
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting config flag: %w", err)
|
|
||||||
}
|
|
||||||
force, err := cmd.Flags().GetBool("force")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting config flag: %w", err)
|
|
||||||
}
|
|
||||||
fetcher := attestationconfigapi.NewFetcher()
|
fetcher := attestationconfigapi.NewFetcher()
|
||||||
conf, err := config.New(fileHandler, configPath, fetcher, force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -213,6 +212,26 @@ func targetVersionsString(target kubernetes.TargetVersions) string {
|
|||||||
return builder.String()
|
return builder.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type statusFlags struct {
|
||||||
|
workspace string
|
||||||
|
force bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStatusFlags(cmd *cobra.Command) (statusFlags, error) {
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return statusFlags{}, fmt.Errorf("getting config flag: %w", err)
|
||||||
|
}
|
||||||
|
force, err := cmd.Flags().GetBool("force")
|
||||||
|
if err != nil {
|
||||||
|
return statusFlags{}, fmt.Errorf("getting config flag: %w", err)
|
||||||
|
}
|
||||||
|
return statusFlags{
|
||||||
|
workspace: workspace,
|
||||||
|
force: force,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
type kubeClient interface {
|
type kubeClient interface {
|
||||||
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ func terminate(cmd *cobra.Command, terminator cloudTerminator, fileHandler file.
|
|||||||
}
|
}
|
||||||
|
|
||||||
spinner.Start("Terminating", false)
|
spinner.Start("Terminating", false)
|
||||||
err = terminator.Terminate(cmd.Context(), flags.logLevel)
|
err = terminator.Terminate(cmd.Context(), constants.TerraformWorkingDir, flags.logLevel)
|
||||||
spinner.Stop()
|
spinner.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terminating Constellation cluster: %w", err)
|
return fmt.Errorf("terminating Constellation cluster: %w", err)
|
||||||
@ -79,19 +79,20 @@ func terminate(cmd *cobra.Command, terminator cloudTerminator, fileHandler file.
|
|||||||
|
|
||||||
var removeErr error
|
var removeErr error
|
||||||
if err := fileHandler.Remove(constants.AdminConfFilename); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
if err := fileHandler.Remove(constants.AdminConfFilename); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||||
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", constants.AdminConfFilename))
|
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", adminConfPath(flags.workspace)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fileHandler.Remove(constants.ClusterIDsFileName); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
if err := fileHandler.Remove(constants.ClusterIDsFilename); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||||
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", constants.ClusterIDsFileName))
|
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", clusterIDsPath(flags.workspace)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return removeErr
|
return removeErr
|
||||||
}
|
}
|
||||||
|
|
||||||
type terminateFlags struct {
|
type terminateFlags struct {
|
||||||
yes bool
|
yes bool
|
||||||
logLevel terraform.LogLevel
|
workspace string
|
||||||
|
logLevel terraform.LogLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseTerminateFlags(cmd *cobra.Command) (terminateFlags, error) {
|
func parseTerminateFlags(cmd *cobra.Command) (terminateFlags, error) {
|
||||||
@ -107,9 +108,14 @@ func parseTerminateFlags(cmd *cobra.Command) (terminateFlags, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return terminateFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
return terminateFlags{}, fmt.Errorf("parsing Terraform log level %s: %w", logLevelString, err)
|
||||||
}
|
}
|
||||||
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
|
if err != nil {
|
||||||
|
return terminateFlags{}, fmt.Errorf("parsing workspace string: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return terminateFlags{
|
return terminateFlags{
|
||||||
yes: yes,
|
yes: yes,
|
||||||
logLevel: logLevel,
|
workspace: workspace,
|
||||||
|
logLevel: logLevel,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
fileHandler := file.NewHandler(fs)
|
fileHandler := file.NewHandler(fs)
|
||||||
require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone))
|
require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone))
|
||||||
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile, file.OptNone))
|
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone))
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
@ -89,7 +89,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs {
|
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs {
|
||||||
fs := afero.NewMemMapFs()
|
fs := afero.NewMemMapFs()
|
||||||
fileHandler := file.NewHandler(fs)
|
fileHandler := file.NewHandler(fs)
|
||||||
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile, file.OptNone))
|
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone))
|
||||||
return fs
|
return fs
|
||||||
},
|
},
|
||||||
terminator: &stubCloudTerminator{},
|
terminator: &stubCloudTerminator{},
|
||||||
@ -137,6 +137,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
|
|
||||||
// register persistent flags manually
|
// register persistent flags manually
|
||||||
cmd.Flags().String("tf-log", "NONE", "")
|
cmd.Flags().String("tf-log", "NONE", "")
|
||||||
|
cmd.Flags().String("workspace", "", "")
|
||||||
|
|
||||||
require.NotNil(tc.setupFs)
|
require.NotNil(tc.setupFs)
|
||||||
fileHandler := file.NewHandler(tc.setupFs(require, tc.idFile))
|
fileHandler := file.NewHandler(tc.setupFs(require, tc.idFile))
|
||||||
@ -157,7 +158,7 @@ func TestTerminate(t *testing.T) {
|
|||||||
assert.True(tc.terminator.Called())
|
assert.True(tc.terminator.Called())
|
||||||
_, err = fileHandler.Stat(constants.AdminConfFilename)
|
_, err = fileHandler.Stat(constants.AdminConfFilename)
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
_, err = fileHandler.Stat(constants.ClusterIDsFileName)
|
_, err = fileHandler.Stat(constants.ClusterIDsFilename)
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,9 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
@ -21,7 +23,7 @@ type tfMigrationClient struct {
|
|||||||
|
|
||||||
// planMigration checks for Terraform migrations and asks for confirmation if there are any. The user input is returned as confirmedDiff.
|
// planMigration checks for Terraform migrations and asks for confirmation if there are any. The user input is returned as confirmedDiff.
|
||||||
// adapted from migrateTerraform().
|
// adapted from migrateTerraform().
|
||||||
func (u *tfMigrationClient) planMigration(cmd *cobra.Command, file file.Handler, migrateCmd upgrade.TfMigrationCmd) (hasDiff bool, err error) {
|
func (u *tfMigrationClient) planMigration(cmd *cobra.Command, file file.Handler, migrateCmd tfMigrationCmd) (hasDiff bool, err error) {
|
||||||
u.log.Debugf("Planning %s", migrateCmd.String())
|
u.log.Debugf("Planning %s", migrateCmd.String())
|
||||||
if err := migrateCmd.CheckTerraformMigrations(file); err != nil {
|
if err := migrateCmd.CheckTerraformMigrations(file); err != nil {
|
||||||
return false, fmt.Errorf("checking workspace: %w", err)
|
return false, fmt.Errorf("checking workspace: %w", err)
|
||||||
@ -35,7 +37,7 @@ func (u *tfMigrationClient) planMigration(cmd *cobra.Command, file file.Handler,
|
|||||||
|
|
||||||
// applyMigration plans and then applies the Terraform migration. The user is asked for confirmation if there are any changes.
|
// applyMigration plans and then applies the Terraform migration. The user is asked for confirmation if there are any changes.
|
||||||
// adapted from migrateTerraform().
|
// adapted from migrateTerraform().
|
||||||
func (u *tfMigrationClient) applyMigration(cmd *cobra.Command, file file.Handler, migrateCmd upgrade.TfMigrationCmd, yesFlag bool) error {
|
func (u *tfMigrationClient) applyMigration(cmd *cobra.Command, upgradeWorkspace string, file file.Handler, migrateCmd tfMigrationCmd, yesFlag bool) error {
|
||||||
hasDiff, err := u.planMigration(cmd, file, migrateCmd)
|
hasDiff, err := u.planMigration(cmd, file, migrateCmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -50,7 +52,7 @@ func (u *tfMigrationClient) applyMigration(cmd *cobra.Command, file file.Handler
|
|||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
cmd.Println("Aborting upgrade.")
|
cmd.Println("Aborting upgrade.")
|
||||||
if err := upgrade.CleanUpTerraformMigrations(migrateCmd.UpgradeID(), file); err != nil {
|
if err := upgrade.CleanUpTerraformMigrations(upgradeWorkspace, migrateCmd.UpgradeID(), file); err != nil {
|
||||||
return fmt.Errorf("cleaning up workspace: %w", err)
|
return fmt.Errorf("cleaning up workspace: %w", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("aborted by user")
|
return fmt.Errorf("aborted by user")
|
||||||
@ -66,3 +68,12 @@ func (u *tfMigrationClient) applyMigration(cmd *cobra.Command, file file.Handler
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tfMigrationCmd is an interface for all terraform upgrade / migration commands.
|
||||||
|
type tfMigrationCmd interface {
|
||||||
|
CheckTerraformMigrations(file file.Handler) error
|
||||||
|
Plan(ctx context.Context, file file.Handler, outWriter io.Writer) (bool, error)
|
||||||
|
Apply(ctx context.Context, fileHandler file.Handler) error
|
||||||
|
String() string
|
||||||
|
UpgradeID() string
|
||||||
|
}
|
||||||
|
@ -61,7 +61,12 @@ func runUpgradeApply(cmd *cobra.Command, _ []string) error {
|
|||||||
}
|
}
|
||||||
defer log.Sync()
|
defer log.Sync()
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
upgrader, err := kubernetes.NewUpgrader(cmd.Context(), cmd.OutOrStdout(), fileHandler, log, kubernetes.UpgradeCmdKindApply)
|
|
||||||
|
upgrader, err := kubernetes.NewUpgrader(
|
||||||
|
cmd.Context(), cmd.OutOrStdout(),
|
||||||
|
constants.UpgradeDir, constants.AdminConfFilename,
|
||||||
|
fileHandler, log, kubernetes.UpgradeCmdKindApply,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -86,7 +91,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
return fmt.Errorf("parsing flags: %w", err)
|
return fmt.Errorf("parsing flags: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
conf, err := config.New(fileHandler, flags.configPath, u.configFetcher, flags.force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, u.configFetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -113,7 +118,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
}
|
}
|
||||||
|
|
||||||
var idFile clusterid.File
|
var idFile clusterid.File
|
||||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil {
|
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||||
return fmt.Errorf("reading cluster ID file: %w", err)
|
return fmt.Errorf("reading cluster ID file: %w", err)
|
||||||
}
|
}
|
||||||
conf.UpdateMAAURL(idFile.AttestationURL)
|
conf.UpdateMAAURL(idFile.AttestationURL)
|
||||||
@ -123,12 +128,12 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
return fmt.Errorf("upgrading measurements: %w", err)
|
return fmt.Errorf("upgrading measurements: %w", err)
|
||||||
}
|
}
|
||||||
// not moving existing Terraform migrator because of planned apply refactor
|
// not moving existing Terraform migrator because of planned apply refactor
|
||||||
if err := u.migrateTerraform(cmd, u.imageFetcher, conf, flags); err != nil {
|
if err := u.migrateTerraform(cmd, u.imageFetcher, conf, fileHandler, flags); err != nil {
|
||||||
return fmt.Errorf("performing Terraform migrations: %w", err)
|
return fmt.Errorf("performing Terraform migrations: %w", err)
|
||||||
}
|
}
|
||||||
// reload idFile after terraform migration
|
// reload idFile after terraform migration
|
||||||
// it might have been updated by the migration
|
// it might have been updated by the migration
|
||||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil {
|
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||||
return fmt.Errorf("reading updated cluster ID file: %w", err)
|
return fmt.Errorf("reading updated cluster ID file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,10 +182,12 @@ func getImage(ctx context.Context, conf *config.Config, fetcher imageFetcher) (s
|
|||||||
|
|
||||||
// migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration
|
// migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration
|
||||||
// of cloud resources with Terraform. If so, the migration is performed.
|
// of cloud resources with Terraform. If so, the migration is performed.
|
||||||
func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, fetcher imageFetcher, conf *config.Config, flags upgradeApplyFlags) error {
|
func (u *upgradeApplyCmd) migrateTerraform(
|
||||||
|
cmd *cobra.Command, fetcher imageFetcher, conf *config.Config, fileHandler file.Handler, flags upgradeApplyFlags,
|
||||||
|
) error {
|
||||||
u.log.Debugf("Planning Terraform migrations")
|
u.log.Debugf("Planning Terraform migrations")
|
||||||
|
|
||||||
if err := u.upgrader.CheckTerraformMigrations(); err != nil {
|
if err := u.upgrader.CheckTerraformMigrations(constants.UpgradeDir); err != nil {
|
||||||
return fmt.Errorf("checking workspace: %w", err)
|
return fmt.Errorf("checking workspace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,9 +214,11 @@ func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, fetcher imageFetc
|
|||||||
u.log.Debugf("Using Terraform variables:\n%v", vars)
|
u.log.Debugf("Using Terraform variables:\n%v", vars)
|
||||||
|
|
||||||
opts := upgrade.TerraformUpgradeOptions{
|
opts := upgrade.TerraformUpgradeOptions{
|
||||||
LogLevel: flags.terraformLogLevel,
|
LogLevel: flags.terraformLogLevel,
|
||||||
CSP: conf.GetProvider(),
|
CSP: conf.GetProvider(),
|
||||||
Vars: vars,
|
Vars: vars,
|
||||||
|
TFWorkspace: constants.TerraformWorkingDir,
|
||||||
|
UpgradeWorkspace: constants.UpgradeDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if there are any Terraform migrations to apply
|
// Check if there are any Terraform migrations to apply
|
||||||
@ -228,20 +237,25 @@ func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, fetcher imageFetc
|
|||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
cmd.Println("Aborting upgrade.")
|
cmd.Println("Aborting upgrade.")
|
||||||
if err := u.upgrader.CleanUpTerraformMigrations(); err != nil {
|
if err := u.upgrader.CleanUpTerraformMigrations(constants.UpgradeDir); err != nil {
|
||||||
return fmt.Errorf("cleaning up workspace: %w", err)
|
return fmt.Errorf("cleaning up workspace: %w", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("aborted by user")
|
return fmt.Errorf("aborted by user")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u.log.Debugf("Applying Terraform migrations")
|
u.log.Debugf("Applying Terraform migrations")
|
||||||
err := u.upgrader.ApplyTerraformMigrations(cmd.Context(), opts)
|
newIDFile, err := u.upgrader.ApplyTerraformMigrations(cmd.Context(), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("applying terraform migrations: %w", err)
|
return fmt.Errorf("applying terraform migrations: %w", err)
|
||||||
}
|
}
|
||||||
|
if err := mergeClusterIDFile(constants.ClusterIDsFilename, newIDFile, fileHandler); err != nil {
|
||||||
|
return fmt.Errorf("merging cluster ID files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
cmd.Printf("Terraform migrations applied successfully and output written to: %s\n"+
|
cmd.Printf("Terraform migrations applied successfully and output written to: %s\n"+
|
||||||
"A backup of the pre-upgrade state has been written to: %s\n",
|
"A backup of the pre-upgrade state has been written to: %s\n",
|
||||||
constants.ClusterIDsFileName, filepath.Join(constants.UpgradeDir, constants.TerraformUpgradeBackupDir))
|
clusterIDsPath(flags.workspace), filepath.Join(opts.UpgradeWorkspace, u.upgrader.GetUpgradeID(), constants.TerraformUpgradeBackupDir))
|
||||||
} else {
|
} else {
|
||||||
u.log.Debugf("No Terraform diff detected")
|
u.log.Debugf("No Terraform diff detected")
|
||||||
}
|
}
|
||||||
@ -327,7 +341,7 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return upgradeApplyFlags{}, err
|
return upgradeApplyFlags{}, err
|
||||||
}
|
}
|
||||||
@ -357,7 +371,7 @@ func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return upgradeApplyFlags{
|
return upgradeApplyFlags{
|
||||||
configPath: configPath,
|
workspace: workspace,
|
||||||
yes: yes,
|
yes: yes,
|
||||||
upgradeTimeout: timeout,
|
upgradeTimeout: timeout,
|
||||||
force: force,
|
force: force,
|
||||||
@ -365,8 +379,21 @@ func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mergeClusterIDFile(clusterIDPath string, newIDFile clusterid.File, fileHandler file.Handler) error {
|
||||||
|
idFile := &clusterid.File{}
|
||||||
|
if err := fileHandler.ReadJSON(clusterIDPath, idFile); err != nil {
|
||||||
|
return fmt.Errorf("reading %s: %w", clusterIDPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fileHandler.WriteJSON(clusterIDPath, idFile.Merge(newIDFile), file.OptOverwrite); err != nil {
|
||||||
|
return fmt.Errorf("writing %s: %w", clusterIDPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type upgradeApplyFlags struct {
|
type upgradeApplyFlags struct {
|
||||||
configPath string
|
workspace string
|
||||||
yes bool
|
yes bool
|
||||||
upgradeTimeout time.Duration
|
upgradeTimeout time.Duration
|
||||||
force bool
|
force bool
|
||||||
@ -380,8 +407,9 @@ type cloudUpgrader interface {
|
|||||||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error)
|
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error)
|
||||||
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)
|
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)
|
||||||
ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) error
|
ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (clusterid.File, error)
|
||||||
CheckTerraformMigrations() error
|
CheckTerraformMigrations(upgradeWorkspace string) error
|
||||||
CleanUpTerraformMigrations() error
|
CleanUpTerraformMigrations(upgradeWorkspace string) error
|
||||||
AddManualStateMigration(migration terraform.StateMigration)
|
AddManualStateMigration(migration terraform.StateMigration)
|
||||||
|
GetUpgradeID() string
|
||||||
}
|
}
|
||||||
|
@ -129,9 +129,9 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
cmd := newUpgradeApplyCmd()
|
cmd := newUpgradeApplyCmd()
|
||||||
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
cmd.SetIn(bytes.NewBufferString(tc.stdin))
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
cmd.Flags().String("tf-log", "DEBUG", "") // register persistent flag manually
|
cmd.Flags().String("tf-log", "DEBUG", "") // register persistent flag manually
|
||||||
|
|
||||||
if tc.yesFlag {
|
if tc.yesFlag {
|
||||||
err := cmd.Flags().Set("yes", "true")
|
err := cmd.Flags().Set("yes", "true")
|
||||||
@ -141,7 +141,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
handler := file.NewHandler(afero.NewMemMapFs())
|
handler := file.NewHandler(afero.NewMemMapFs())
|
||||||
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
|
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
|
||||||
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
|
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
|
||||||
require.NoError(handler.WriteJSON(constants.ClusterIDsFileName, clusterid.File{}))
|
require.NoError(handler.WriteJSON(constants.ClusterIDsFilename, clusterid.File{}))
|
||||||
|
|
||||||
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), imageFetcher: tc.fetcher, configFetcher: stubAttestationFetcher{}}
|
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), imageFetcher: tc.fetcher, configFetcher: stubAttestationFetcher{}}
|
||||||
|
|
||||||
@ -186,11 +186,11 @@ func (u stubUpgrader) GetClusterAttestationConfig(_ context.Context, _ variant.V
|
|||||||
return u.currentConfig, &corev1.ConfigMap{}, nil
|
return u.currentConfig, &corev1.ConfigMap{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) CheckTerraformMigrations() error {
|
func (u stubUpgrader) CheckTerraformMigrations(_ string) error {
|
||||||
return u.checkTerraformErr
|
return u.checkTerraformErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) CleanUpTerraformMigrations() error {
|
func (u stubUpgrader) CleanUpTerraformMigrations(_ string) error {
|
||||||
return u.cleanTerraformErr
|
return u.cleanTerraformErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,8 +198,8 @@ func (u stubUpgrader) PlanTerraformMigrations(context.Context, upgrade.Terraform
|
|||||||
return u.terraformDiff, u.planTerraformErr
|
return u.terraformDiff, u.planTerraformErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) ApplyTerraformMigrations(context.Context, upgrade.TerraformUpgradeOptions) error {
|
func (u stubUpgrader) ApplyTerraformMigrations(context.Context, upgrade.TerraformUpgradeOptions) (clusterid.File, error) {
|
||||||
return u.applyTerraformErr
|
return clusterid.File{}, u.applyTerraformErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) ExtendClusterConfigCertSANs(_ context.Context, _ []string) error {
|
func (u stubUpgrader) ExtendClusterConfigCertSANs(_ context.Context, _ []string) error {
|
||||||
|
@ -65,15 +65,22 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
|||||||
return fmt.Errorf("creating logger: %w", err)
|
return fmt.Errorf("creating logger: %w", err)
|
||||||
}
|
}
|
||||||
defer log.Sync()
|
defer log.Sync()
|
||||||
|
|
||||||
flags, err := parseUpgradeCheckFlags(cmd)
|
flags, err := parseUpgradeCheckFlags(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
checker, err := kubernetes.NewUpgrader(cmd.Context(), cmd.OutOrStdout(), fileHandler, log, kubernetes.UpgradeCmdKindCheck)
|
checker, err := kubernetes.NewUpgrader(
|
||||||
|
cmd.Context(), cmd.OutOrStdout(),
|
||||||
|
constants.UpgradeDir, constants.AdminConfFilename,
|
||||||
|
fileHandler, log, kubernetes.UpgradeCmdKindCheck,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
|
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
versionfetcher := versionsapi.NewFetcher()
|
versionfetcher := versionsapi.NewFetcher()
|
||||||
rekor, err := sigstore.NewRekor()
|
rekor, err := sigstore.NewRekor()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -102,10 +109,6 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseUpgradeCheckFlags(cmd *cobra.Command) (upgradeCheckFlags, error) {
|
func parseUpgradeCheckFlags(cmd *cobra.Command) (upgradeCheckFlags, error) {
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
|
||||||
if err != nil {
|
|
||||||
return upgradeCheckFlags{}, fmt.Errorf("parsing config string: %w", err)
|
|
||||||
}
|
|
||||||
force, err := cmd.Flags().GetBool("force")
|
force, err := cmd.Flags().GetBool("force")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return upgradeCheckFlags{}, fmt.Errorf("parsing force bool: %w", err)
|
return upgradeCheckFlags{}, fmt.Errorf("parsing force bool: %w", err)
|
||||||
@ -133,7 +136,6 @@ func parseUpgradeCheckFlags(cmd *cobra.Command) (upgradeCheckFlags, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return upgradeCheckFlags{
|
return upgradeCheckFlags{
|
||||||
configPath: configPath,
|
|
||||||
force: force,
|
force: force,
|
||||||
updateConfig: updateConfig,
|
updateConfig: updateConfig,
|
||||||
ref: ref,
|
ref: ref,
|
||||||
@ -152,7 +154,7 @@ type upgradeCheckCmd struct {
|
|||||||
|
|
||||||
// upgradePlan plans an upgrade of a Constellation cluster.
|
// upgradePlan plans an upgrade of a Constellation cluster.
|
||||||
func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Handler, fetcher attestationconfigapi.Fetcher, flags upgradeCheckFlags) error {
|
func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Handler, fetcher attestationconfigapi.Fetcher, flags upgradeCheckFlags) error {
|
||||||
conf, err := config.New(fileHandler, flags.configPath, fetcher, flags.force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -160,7 +162,6 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
u.log.Debugf("Read configuration from %q", flags.configPath)
|
|
||||||
|
|
||||||
if !u.canUpgradeCheck {
|
if !u.canUpgradeCheck {
|
||||||
cmd.PrintErrln("Planning Constellation upgrades automatically is not supported in the OSS build of the Constellation CLI. Consult the documentation for instructions on where to download the enterprise version.")
|
cmd.PrintErrln("Planning Constellation upgrades automatically is not supported in the OSS build of the Constellation CLI. Consult the documentation for instructions on where to download the enterprise version.")
|
||||||
@ -205,7 +206,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
}
|
}
|
||||||
|
|
||||||
u.log.Debugf("Planning Terraform migrations")
|
u.log.Debugf("Planning Terraform migrations")
|
||||||
if err := u.checker.CheckTerraformMigrations(); err != nil {
|
if err := u.checker.CheckTerraformMigrations(constants.UpgradeDir); err != nil { // Why is this run twice?????
|
||||||
return fmt.Errorf("checking workspace: %w", err)
|
return fmt.Errorf("checking workspace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,7 +221,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
u.checker.AddManualStateMigration(migration)
|
u.checker.AddManualStateMigration(migration)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := u.checker.CheckTerraformMigrations(); err != nil {
|
if err := u.checker.CheckTerraformMigrations(constants.UpgradeDir); err != nil { // Why is this run twice?????
|
||||||
return fmt.Errorf("checking workspace: %w", err)
|
return fmt.Errorf("checking workspace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -236,9 +237,11 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
u.log.Debugf("Using Terraform variables:\n%v", vars)
|
u.log.Debugf("Using Terraform variables:\n%v", vars)
|
||||||
|
|
||||||
opts := upgrade.TerraformUpgradeOptions{
|
opts := upgrade.TerraformUpgradeOptions{
|
||||||
LogLevel: flags.terraformLogLevel,
|
LogLevel: flags.terraformLogLevel,
|
||||||
CSP: conf.GetProvider(),
|
CSP: conf.GetProvider(),
|
||||||
Vars: vars,
|
Vars: vars,
|
||||||
|
TFWorkspace: constants.TerraformWorkingDir,
|
||||||
|
UpgradeWorkspace: constants.UpgradeDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Println("The following Terraform migrations are available with this CLI:")
|
cmd.Println("The following Terraform migrations are available with this CLI:")
|
||||||
@ -249,7 +252,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
return fmt.Errorf("planning terraform migrations: %w", err)
|
return fmt.Errorf("planning terraform migrations: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := u.checker.CleanUpTerraformMigrations(); err != nil {
|
if err := u.checker.CleanUpTerraformMigrations(constants.UpgradeDir); err != nil {
|
||||||
u.log.Debugf("Failed to clean up Terraform migrations: %v", err)
|
u.log.Debugf("Failed to clean up Terraform migrations: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -278,7 +281,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
|||||||
cmd.Print(updateMsg)
|
cmd.Print(updateMsg)
|
||||||
|
|
||||||
if flags.updateConfig {
|
if flags.updateConfig {
|
||||||
if err := upgrade.writeConfig(conf, fileHandler, flags.configPath); err != nil {
|
if err := upgrade.writeConfig(conf, fileHandler, constants.ConfigFilename); err != nil {
|
||||||
return fmt.Errorf("writing config: %w", err)
|
return fmt.Errorf("writing config: %w", err)
|
||||||
}
|
}
|
||||||
cmd.Println("Config updated successfully.")
|
cmd.Println("Config updated successfully.")
|
||||||
@ -383,7 +386,7 @@ type currentVersionInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionInfo, error) {
|
func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionInfo, error) {
|
||||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.AdminConfFilename, constants.HelmNamespace, v.log)
|
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, v.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return currentVersionInfo{}, fmt.Errorf("setting up helm client: %w", err)
|
return currentVersionInfo{}, fmt.Errorf("setting up helm client: %w", err)
|
||||||
}
|
}
|
||||||
@ -749,7 +752,6 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP
|
|||||||
}
|
}
|
||||||
|
|
||||||
type upgradeCheckFlags struct {
|
type upgradeCheckFlags struct {
|
||||||
configPath string
|
|
||||||
force bool
|
force bool
|
||||||
updateConfig bool
|
updateConfig bool
|
||||||
ref string
|
ref string
|
||||||
@ -761,8 +763,8 @@ type upgradeChecker interface {
|
|||||||
CurrentImage(ctx context.Context) (string, error)
|
CurrentImage(ctx context.Context) (string, error)
|
||||||
CurrentKubernetesVersion(ctx context.Context) (string, error)
|
CurrentKubernetesVersion(ctx context.Context) (string, error)
|
||||||
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)
|
PlanTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (bool, error)
|
||||||
CheckTerraformMigrations() error
|
CheckTerraformMigrations(upgradeWorkspace string) error
|
||||||
CleanUpTerraformMigrations() error
|
CleanUpTerraformMigrations(upgradeWorkspace string) error
|
||||||
AddManualStateMigration(migration terraform.StateMigration)
|
AddManualStateMigration(migration terraform.StateMigration)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,7 +215,6 @@ func TestUpgradeCheck(t *testing.T) {
|
|||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
collector stubVersionCollector
|
collector stubVersionCollector
|
||||||
flags upgradeCheckFlags
|
|
||||||
csp cloudprovider.Provider
|
csp cloudprovider.Provider
|
||||||
checker stubUpgradeChecker
|
checker stubUpgradeChecker
|
||||||
imagefetcher stubImageFetcher
|
imagefetcher stubImageFetcher
|
||||||
@ -226,11 +225,8 @@ func TestUpgradeCheck(t *testing.T) {
|
|||||||
collector: collector,
|
collector: collector,
|
||||||
checker: stubUpgradeChecker{},
|
checker: stubUpgradeChecker{},
|
||||||
imagefetcher: stubImageFetcher{},
|
imagefetcher: stubImageFetcher{},
|
||||||
flags: upgradeCheckFlags{
|
csp: cloudprovider.GCP,
|
||||||
configPath: constants.ConfigFilename,
|
cliVersion: "v1.0.0",
|
||||||
},
|
|
||||||
csp: cloudprovider.GCP,
|
|
||||||
cliVersion: "v1.0.0",
|
|
||||||
},
|
},
|
||||||
"terraform err": {
|
"terraform err": {
|
||||||
collector: collector,
|
collector: collector,
|
||||||
@ -238,12 +234,9 @@ func TestUpgradeCheck(t *testing.T) {
|
|||||||
err: assert.AnError,
|
err: assert.AnError,
|
||||||
},
|
},
|
||||||
imagefetcher: stubImageFetcher{},
|
imagefetcher: stubImageFetcher{},
|
||||||
flags: upgradeCheckFlags{
|
csp: cloudprovider.GCP,
|
||||||
configPath: constants.ConfigFilename,
|
cliVersion: "v1.0.0",
|
||||||
},
|
wantError: true,
|
||||||
csp: cloudprovider.GCP,
|
|
||||||
cliVersion: "v1.0.0",
|
|
||||||
wantError: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,7 +246,7 @@ func TestUpgradeCheck(t *testing.T) {
|
|||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.csp)
|
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.csp)
|
||||||
require.NoError(fileHandler.WriteYAML(tc.flags.configPath, cfg))
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
|
||||||
|
|
||||||
checkCmd := upgradeCheckCmd{
|
checkCmd := upgradeCheckCmd{
|
||||||
canUpgradeCheck: true,
|
canUpgradeCheck: true,
|
||||||
@ -265,7 +258,7 @@ func TestUpgradeCheck(t *testing.T) {
|
|||||||
|
|
||||||
cmd := newUpgradeCheckCmd()
|
cmd := newUpgradeCheckCmd()
|
||||||
|
|
||||||
err := checkCmd.upgradeCheck(cmd, fileHandler, stubAttestationFetcher{}, tc.flags)
|
err := checkCmd.upgradeCheck(cmd, fileHandler, stubAttestationFetcher{}, upgradeCheckFlags{})
|
||||||
if tc.wantError {
|
if tc.wantError {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
return
|
return
|
||||||
@ -348,11 +341,11 @@ func (u stubUpgradeChecker) PlanTerraformMigrations(context.Context, upgrade.Ter
|
|||||||
return u.tfDiff, u.err
|
return u.tfDiff, u.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgradeChecker) CheckTerraformMigrations() error {
|
func (u stubUpgradeChecker) CheckTerraformMigrations(_ string) error {
|
||||||
return u.err
|
return u.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgradeChecker) CleanUpTerraformMigrations() error {
|
func (u stubUpgradeChecker) CleanUpTerraformMigrations(_ string) error {
|
||||||
return u.err
|
return u.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ func NewVerifyCmd() *cobra.Command {
|
|||||||
Use: "verify",
|
Use: "verify",
|
||||||
Short: "Verify the confidential properties of a Constellation cluster",
|
Short: "Verify the confidential properties of a Constellation cluster",
|
||||||
Long: "Verify the confidential properties of a Constellation cluster.\n" +
|
Long: "Verify the confidential properties of a Constellation cluster.\n" +
|
||||||
"If arguments aren't specified, values are read from `" + constants.ClusterIDsFileName + "`.",
|
"If arguments aren't specified, values are read from `" + constants.ClusterIDsFilename + "`.",
|
||||||
Args: cobra.ExactArgs(0),
|
Args: cobra.ExactArgs(0),
|
||||||
RunE: runVerify,
|
RunE: runVerify,
|
||||||
}
|
}
|
||||||
@ -85,8 +85,8 @@ func (c *verifyCmd) verify(cmd *cobra.Command, fileHandler file.Handler, verifyC
|
|||||||
}
|
}
|
||||||
c.log.Debugf("Using flags: %+v", flags)
|
c.log.Debugf("Using flags: %+v", flags)
|
||||||
|
|
||||||
c.log.Debugf("Loading configuration file from %q", flags.configPath)
|
c.log.Debugf("Loading configuration file from %q", configPath(flags.workspace))
|
||||||
conf, err := config.New(fileHandler, flags.configPath, configFetcher, flags.force)
|
conf, err := config.New(fileHandler, constants.ConfigFilename, configFetcher, flags.force)
|
||||||
var configValidationErr *config.ValidationError
|
var configValidationErr *config.ValidationError
|
||||||
if errors.As(err, &configValidationErr) {
|
if errors.As(err, &configValidationErr) {
|
||||||
cmd.PrintErrln(configValidationErr.LongMessage())
|
cmd.PrintErrln(configValidationErr.LongMessage())
|
||||||
@ -138,11 +138,11 @@ func (c *verifyCmd) verify(cmd *cobra.Command, fileHandler file.Handler, verifyC
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handler) (verifyFlags, error) {
|
func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handler) (verifyFlags, error) {
|
||||||
configPath, err := cmd.Flags().GetString("config")
|
workspace, err := cmd.Flags().GetString("workspace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return verifyFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
return verifyFlags{}, fmt.Errorf("parsing config path argument: %w", err)
|
||||||
}
|
}
|
||||||
c.log.Debugf("Flag 'config' set to %q", configPath)
|
c.log.Debugf("Flag 'workspace' set to %q", workspace)
|
||||||
|
|
||||||
ownerID := ""
|
ownerID := ""
|
||||||
clusterID, err := cmd.Flags().GetString("cluster-id")
|
clusterID, err := cmd.Flags().GetString("cluster-id")
|
||||||
@ -170,7 +170,7 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
|
|||||||
c.log.Debugf("Flag 'raw' set to %t", force)
|
c.log.Debugf("Flag 'raw' set to %t", force)
|
||||||
|
|
||||||
var idFile clusterid.File
|
var idFile clusterid.File
|
||||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
|
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
|
||||||
return verifyFlags{}, fmt.Errorf("reading cluster ID file: %w", err)
|
return verifyFlags{}, fmt.Errorf("reading cluster ID file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,13 +178,13 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
|
|||||||
emptyEndpoint := endpoint == ""
|
emptyEndpoint := endpoint == ""
|
||||||
emptyIDs := ownerID == "" && clusterID == ""
|
emptyIDs := ownerID == "" && clusterID == ""
|
||||||
if emptyEndpoint || emptyIDs {
|
if emptyEndpoint || emptyIDs {
|
||||||
c.log.Debugf("Trying to supplement empty flag values from %q", constants.ClusterIDsFileName)
|
c.log.Debugf("Trying to supplement empty flag values from %q", clusterIDsPath(workspace))
|
||||||
if emptyEndpoint {
|
if emptyEndpoint {
|
||||||
cmd.Printf("Using endpoint from %q. Specify --node-endpoint to override this.\n", constants.ClusterIDsFileName)
|
cmd.Printf("Using endpoint from %q. Specify --node-endpoint to override this.\n", clusterIDsPath(workspace))
|
||||||
endpoint = idFile.IP
|
endpoint = idFile.IP
|
||||||
}
|
}
|
||||||
if emptyIDs {
|
if emptyIDs {
|
||||||
cmd.Printf("Using ID from %q. Specify --cluster-id to override this.\n", constants.ClusterIDsFileName)
|
cmd.Printf("Using ID from %q. Specify --cluster-id to override this.\n", clusterIDsPath(workspace))
|
||||||
ownerID = idFile.OwnerID
|
ownerID = idFile.OwnerID
|
||||||
clusterID = idFile.ClusterID
|
clusterID = idFile.ClusterID
|
||||||
}
|
}
|
||||||
@ -200,24 +200,24 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
|
|||||||
}
|
}
|
||||||
|
|
||||||
return verifyFlags{
|
return verifyFlags{
|
||||||
endpoint: endpoint,
|
endpoint: endpoint,
|
||||||
configPath: configPath,
|
workspace: workspace,
|
||||||
ownerID: ownerID,
|
ownerID: ownerID,
|
||||||
clusterID: clusterID,
|
clusterID: clusterID,
|
||||||
maaURL: idFile.AttestationURL,
|
maaURL: idFile.AttestationURL,
|
||||||
rawOutput: raw,
|
rawOutput: raw,
|
||||||
force: force,
|
force: force,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type verifyFlags struct {
|
type verifyFlags struct {
|
||||||
endpoint string
|
endpoint string
|
||||||
ownerID string
|
ownerID string
|
||||||
clusterID string
|
clusterID string
|
||||||
configPath string
|
workspace string
|
||||||
maaURL string
|
maaURL string
|
||||||
rawOutput bool
|
rawOutput bool
|
||||||
force bool
|
force bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func addPortIfMissing(endpoint string, defaultPort int) (string, error) {
|
func addPortIfMissing(endpoint string, defaultPort int) (string, error) {
|
||||||
|
@ -42,15 +42,15 @@ func TestVerify(t *testing.T) {
|
|||||||
someErr := errors.New("failed")
|
someErr := errors.New("failed")
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
provider cloudprovider.Provider
|
provider cloudprovider.Provider
|
||||||
protoClient *stubVerifyClient
|
protoClient *stubVerifyClient
|
||||||
formatter *stubAttDocFormatter
|
formatter *stubAttDocFormatter
|
||||||
nodeEndpointFlag string
|
nodeEndpointFlag string
|
||||||
configFlag string
|
clusterIDFlag string
|
||||||
clusterIDFlag string
|
idFile *clusterid.File
|
||||||
idFile *clusterid.File
|
wantEndpoint string
|
||||||
wantEndpoint string
|
skipConfigCreation bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"gcp": {
|
"gcp": {
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
@ -123,12 +123,12 @@ func TestVerify(t *testing.T) {
|
|||||||
formatter: &stubAttDocFormatter{},
|
formatter: &stubAttDocFormatter{},
|
||||||
},
|
},
|
||||||
"config file not existing": {
|
"config file not existing": {
|
||||||
provider: cloudprovider.GCP,
|
provider: cloudprovider.GCP,
|
||||||
clusterIDFlag: zeroBase64,
|
clusterIDFlag: zeroBase64,
|
||||||
nodeEndpointFlag: "192.0.2.1:1234",
|
nodeEndpointFlag: "192.0.2.1:1234",
|
||||||
configFlag: "./file",
|
formatter: &stubAttDocFormatter{},
|
||||||
formatter: &stubAttDocFormatter{},
|
skipConfigCreation: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"error protoClient GetState": {
|
"error protoClient GetState": {
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
@ -163,14 +163,11 @@ func TestVerify(t *testing.T) {
|
|||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
cmd := NewVerifyCmd()
|
cmd := NewVerifyCmd()
|
||||||
cmd.Flags().String("config", constants.ConfigFilename, "") // register persistent flag manually
|
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||||
out := &bytes.Buffer{}
|
out := &bytes.Buffer{}
|
||||||
cmd.SetOut(out)
|
cmd.SetOut(out)
|
||||||
cmd.SetErr(&bytes.Buffer{})
|
cmd.SetErr(&bytes.Buffer{})
|
||||||
if tc.configFlag != "" {
|
|
||||||
require.NoError(cmd.Flags().Set("config", tc.configFlag))
|
|
||||||
}
|
|
||||||
if tc.clusterIDFlag != "" {
|
if tc.clusterIDFlag != "" {
|
||||||
require.NoError(cmd.Flags().Set("cluster-id", tc.clusterIDFlag))
|
require.NoError(cmd.Flags().Set("cluster-id", tc.clusterIDFlag))
|
||||||
}
|
}
|
||||||
@ -179,10 +176,12 @@ func TestVerify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
fileHandler := file.NewHandler(afero.NewMemMapFs())
|
||||||
|
|
||||||
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
|
if !tc.skipConfigCreation {
|
||||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
|
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
|
||||||
|
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
|
||||||
|
}
|
||||||
if tc.idFile != nil {
|
if tc.idFile != nil {
|
||||||
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFileName, tc.idFile, file.OptNone))
|
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, tc.idFile, file.OptNone))
|
||||||
}
|
}
|
||||||
|
|
||||||
v := &verifyCmd{log: logger.NewTest(t)}
|
v := &verifyCmd{log: logger.NewTest(t)}
|
||||||
|
54
cli/internal/cmd/workspace.go
Normal file
54
cli/internal/cmd/workspace.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) Edgeless Systems GmbH
|
||||||
|
|
||||||
|
SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Users may override the default workspace using the --workspace flag.
|
||||||
|
// The default workspace is the current working directory.
|
||||||
|
// The following functions return paths relative to the set workspace,
|
||||||
|
// and should be used when printing the path to the user.
|
||||||
|
// The MUST not be used when accessing files, as the workspace is changed
|
||||||
|
// using os.Chdir() before the command is executed.
|
||||||
|
|
||||||
|
func adminConfPath(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.AdminConfFilename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func configPath(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.ConfigFilename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func clusterIDsPath(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.ClusterIDsFilename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func masterSecretPath(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.MasterSecretFilename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func terraformClusterWorkspace(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.TerraformWorkingDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func terraformIAMWorkspace(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.TerraformIAMWorkingDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func terraformLogPath(workspace string) string {
|
||||||
|
return filepath.Join(workspace, constants.TerraformLogFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
const gcpServiceAccountKeyFile = "gcpServiceAccountKey.json"
|
||||||
|
|
||||||
|
func gcpServiceAccountKeyPath(workspace string) string {
|
||||||
|
return filepath.Join(workspace, gcpServiceAccountKeyFile)
|
||||||
|
}
|
@ -11,7 +11,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -100,7 +99,7 @@ func (c *UpgradeClient) backupCRs(ctx context.Context, crds []apiextensionsv1.Cu
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *UpgradeClient) backupFolder(upgradeID string) string {
|
func (c *UpgradeClient) backupFolder(upgradeID string) string {
|
||||||
return filepath.Join(constants.UpgradeDir, upgradeID, "backups") + string(filepath.Separator)
|
return filepath.Join(c.upgradeWorkspace, upgradeID, "backups") + string(filepath.Separator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *UpgradeClient) crdBackupFolder(upgradeID string) string {
|
func (c *UpgradeClient) crdBackupFolder(upgradeID string) string {
|
||||||
|
@ -13,32 +13,27 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Initializer installs all Helm charts required for a constellation cluster.
|
// InitializationClient installs all Helm charts required for a Constellation cluster.
|
||||||
type Initializer interface {
|
type InitializationClient struct {
|
||||||
Install(ctx context.Context, releases *Releases) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type initializationClient struct {
|
|
||||||
log debugLog
|
log debugLog
|
||||||
installer installer
|
installer installer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInitializer creates a new client to install all Helm charts required for a constellation cluster.
|
// NewInitializer creates a new client to install all Helm charts required for a constellation cluster.
|
||||||
func NewInitializer(log debugLog) (Initializer, error) {
|
func NewInitializer(log debugLog, adminConfPath string) (*InitializationClient, error) {
|
||||||
installer, err := NewInstaller(constants.AdminConfFilename, log)
|
installer, err := NewInstaller(adminConfPath, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("creating Helm installer: %w", err)
|
return nil, fmt.Errorf("creating Helm installer: %w", err)
|
||||||
}
|
}
|
||||||
return &initializationClient{log: log, installer: installer}, nil
|
return &InitializationClient{log: log, installer: installer}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install installs all Helm charts required for a constellation cluster.
|
// Install installs all Helm charts required for a constellation cluster.
|
||||||
func (h initializationClient) Install(ctx context.Context, releases *Releases,
|
func (i InitializationClient) Install(ctx context.Context, releases *Releases) error {
|
||||||
) error {
|
if err := i.installer.InstallChart(ctx, releases.Cilium); err != nil {
|
||||||
if err := h.installer.InstallChart(ctx, releases.Cilium); err != nil {
|
|
||||||
return fmt.Errorf("installing Cilium: %w", err)
|
return fmt.Errorf("installing Cilium: %w", err)
|
||||||
}
|
}
|
||||||
h.log.Debugf("Waiting for Cilium to become ready")
|
i.log.Debugf("Waiting for Cilium to become ready")
|
||||||
helper, err := newK8sCiliumHelper(constants.AdminConfFilename)
|
helper, err := newK8sCiliumHelper(constants.AdminConfFilename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating Kubernetes client: %w", err)
|
return fmt.Errorf("creating Kubernetes client: %w", err)
|
||||||
@ -46,43 +41,43 @@ func (h initializationClient) Install(ctx context.Context, releases *Releases,
|
|||||||
timeToStartWaiting := time.Now()
|
timeToStartWaiting := time.Now()
|
||||||
// TODO(3u13r): Reduce the timeout when we switched the package repository - this is only this high because we once
|
// TODO(3u13r): Reduce the timeout when we switched the package repository - this is only this high because we once
|
||||||
// saw polling times of ~16 minutes when hitting a slow PoP from Fastly (GitHub's / ghcr.io CDN).
|
// saw polling times of ~16 minutes when hitting a slow PoP from Fastly (GitHub's / ghcr.io CDN).
|
||||||
if err := helper.WaitForDS(ctx, "kube-system", "cilium", h.log); err != nil {
|
if err := helper.WaitForDS(ctx, "kube-system", "cilium", i.log); err != nil {
|
||||||
return fmt.Errorf("waiting for Cilium to become healthy: %w", err)
|
return fmt.Errorf("waiting for Cilium to become healthy: %w", err)
|
||||||
}
|
}
|
||||||
timeUntilFinishedWaiting := time.Since(timeToStartWaiting)
|
timeUntilFinishedWaiting := time.Since(timeToStartWaiting)
|
||||||
h.log.Debugf("Cilium became healthy after %s", timeUntilFinishedWaiting.String())
|
i.log.Debugf("Cilium became healthy after %s", timeUntilFinishedWaiting.String())
|
||||||
|
|
||||||
h.log.Debugf("Fix Cilium through restart")
|
i.log.Debugf("Fix Cilium through restart")
|
||||||
if err := helper.RestartDS("kube-system", "cilium"); err != nil {
|
if err := helper.RestartDS("kube-system", "cilium"); err != nil {
|
||||||
return fmt.Errorf("restarting Cilium: %w", err)
|
return fmt.Errorf("restarting Cilium: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h.log.Debugf("Installing microservices")
|
i.log.Debugf("Installing microservices")
|
||||||
if err := h.installer.InstallChart(ctx, releases.ConstellationServices); err != nil {
|
if err := i.installer.InstallChart(ctx, releases.ConstellationServices); err != nil {
|
||||||
return fmt.Errorf("installing microservices: %w", err)
|
return fmt.Errorf("installing microservices: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h.log.Debugf("Installing cert-manager")
|
i.log.Debugf("Installing cert-manager")
|
||||||
if err := h.installer.InstallChart(ctx, releases.CertManager); err != nil {
|
if err := i.installer.InstallChart(ctx, releases.CertManager); err != nil {
|
||||||
return fmt.Errorf("installing cert-manager: %w", err)
|
return fmt.Errorf("installing cert-manager: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if releases.CSI != nil {
|
if releases.CSI != nil {
|
||||||
h.log.Debugf("Installing CSI deployments")
|
i.log.Debugf("Installing CSI deployments")
|
||||||
if err := h.installer.InstallChart(ctx, *releases.CSI); err != nil {
|
if err := i.installer.InstallChart(ctx, *releases.CSI); err != nil {
|
||||||
return fmt.Errorf("installing CSI snapshot CRDs: %w", err)
|
return fmt.Errorf("installing CSI snapshot CRDs: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if releases.AWSLoadBalancerController != nil {
|
if releases.AWSLoadBalancerController != nil {
|
||||||
h.log.Debugf("Installing AWS Load Balancer Controller")
|
i.log.Debugf("Installing AWS Load Balancer Controller")
|
||||||
if err := h.installer.InstallChart(ctx, *releases.AWSLoadBalancerController); err != nil {
|
if err := i.installer.InstallChart(ctx, *releases.AWSLoadBalancerController); err != nil {
|
||||||
return fmt.Errorf("installing AWS Load Balancer Controller: %w", err)
|
return fmt.Errorf("installing AWS Load Balancer Controller: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
h.log.Debugf("Installing constellation operators")
|
i.log.Debugf("Installing constellation operators")
|
||||||
if err := h.installer.InstallChart(ctx, releases.ConstellationOperators); err != nil {
|
if err := i.installer.InstallChart(ctx, releases.ConstellationOperators); err != nil {
|
||||||
return fmt.Errorf("installing constellation operators: %w", err)
|
return fmt.Errorf("installing constellation operators: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -143,7 +143,7 @@ type installDoer struct {
|
|||||||
func (i installDoer) Do(ctx context.Context) error {
|
func (i installDoer) Do(ctx context.Context) error {
|
||||||
i.log.Debugf("Trying to install Helm chart %s", i.chart.Name())
|
i.log.Debugf("Trying to install Helm chart %s", i.chart.Name())
|
||||||
if _, err := i.Installer.RunWithContext(ctx, i.chart, i.values); err != nil {
|
if _, err := i.Installer.RunWithContext(ctx, i.chart, i.values); err != nil {
|
||||||
i.log.Debugf("Helm chart installation % failed: %v", i.chart.Name(), err)
|
i.log.Debugf("Helm chart installation %s failed: %v", i.chart.Name(), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,17 +46,18 @@ var errReleaseNotFound = errors.New("release not found")
|
|||||||
|
|
||||||
// UpgradeClient handles interaction with helm and the cluster.
|
// UpgradeClient handles interaction with helm and the cluster.
|
||||||
type UpgradeClient struct {
|
type UpgradeClient struct {
|
||||||
config *action.Configuration
|
config *action.Configuration
|
||||||
kubectl crdClient
|
kubectl crdClient
|
||||||
fs file.Handler
|
fs file.Handler
|
||||||
actions actionWrapper
|
actions actionWrapper
|
||||||
log debugLog
|
upgradeWorkspace string
|
||||||
|
log debugLog
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpgradeClient returns a new initializes upgrade client for the given namespace.
|
// NewUpgradeClient returns a newly initialized UpgradeClient for the given namespace.
|
||||||
func NewUpgradeClient(client crdClient, kubeConfigPath, helmNamespace string, log debugLog) (*UpgradeClient, error) {
|
func NewUpgradeClient(client crdClient, upgradeWorkspace, kubeConfigPath, helmNamespace string, log debugLog) (*UpgradeClient, error) {
|
||||||
settings := cli.New()
|
settings := cli.New()
|
||||||
settings.KubeConfig = kubeConfigPath // constants.AdminConfFilename
|
settings.KubeConfig = kubeConfigPath
|
||||||
|
|
||||||
actionConfig := &action.Configuration{}
|
actionConfig := &action.Configuration{}
|
||||||
if err := actionConfig.Init(settings.RESTClientGetter(), helmNamespace, "secret", log.Debugf); err != nil {
|
if err := actionConfig.Init(settings.RESTClientGetter(), helmNamespace, "secret", log.Debugf); err != nil {
|
||||||
@ -74,7 +75,13 @@ func NewUpgradeClient(client crdClient, kubeConfigPath, helmNamespace string, lo
|
|||||||
return nil, fmt.Errorf("initializing kubectl: %w", err)
|
return nil, fmt.Errorf("initializing kubectl: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &UpgradeClient{kubectl: client, fs: fileHandler, actions: actions{config: actionConfig}, log: log}, nil
|
return &UpgradeClient{
|
||||||
|
kubectl: client,
|
||||||
|
fs: fileHandler,
|
||||||
|
actions: actions{config: actionConfig},
|
||||||
|
upgradeWorkspace: upgradeWorkspace,
|
||||||
|
log: log,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *UpgradeClient) shouldUpgrade(releaseName string, newVersion semver.Semver, force bool) error {
|
func (c *UpgradeClient) shouldUpgrade(releaseName string, newVersion semver.Semver, force bool) error {
|
||||||
|
@ -102,7 +102,10 @@ type Upgrader struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewUpgrader returns a new Upgrader.
|
// NewUpgrader returns a new Upgrader.
|
||||||
func NewUpgrader(ctx context.Context, outWriter io.Writer, fileHandler file.Handler, log debugLog, upgradeCmdKind UpgradeCmdKind) (*Upgrader, error) {
|
func NewUpgrader(
|
||||||
|
ctx context.Context, outWriter io.Writer, upgradeWorkspace, kubeConfigPath string,
|
||||||
|
fileHandler file.Handler, log debugLog, upgradeCmdKind UpgradeCmdKind,
|
||||||
|
) (*Upgrader, error) {
|
||||||
upgradeID := "upgrade-" + time.Now().Format("20060102150405") + "-" + strings.Split(uuid.New().String(), "-")[0]
|
upgradeID := "upgrade-" + time.Now().Format("20060102150405") + "-" + strings.Split(uuid.New().String(), "-")[0]
|
||||||
if upgradeCmdKind == UpgradeCmdKindCheck {
|
if upgradeCmdKind == UpgradeCmdKindCheck {
|
||||||
// When performing an upgrade check, the upgrade directory will only be used temporarily to store the
|
// When performing an upgrade check, the upgrade directory will only be used temporarily to store the
|
||||||
@ -118,7 +121,7 @@ func NewUpgrader(ctx context.Context, outWriter io.Writer, fileHandler file.Hand
|
|||||||
upgradeID: upgradeID,
|
upgradeID: upgradeID,
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeConfig, err := clientcmd.BuildConfigFromFlags("", constants.AdminConfFilename)
|
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("building kubernetes config: %w", err)
|
return nil, fmt.Errorf("building kubernetes config: %w", err)
|
||||||
}
|
}
|
||||||
@ -136,13 +139,13 @@ func NewUpgrader(ctx context.Context, outWriter io.Writer, fileHandler file.Hand
|
|||||||
}
|
}
|
||||||
u.dynamicInterface = &NodeVersionClient{client: unstructuredClient}
|
u.dynamicInterface = &NodeVersionClient{client: unstructuredClient}
|
||||||
|
|
||||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.AdminConfFilename, constants.HelmNamespace, log)
|
helmClient, err := helm.NewUpgradeClient(kubectl.New(), upgradeWorkspace, kubeConfigPath, constants.HelmNamespace, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("setting up helm client: %w", err)
|
return nil, fmt.Errorf("setting up helm client: %w", err)
|
||||||
}
|
}
|
||||||
u.helmClient = helmClient
|
u.helmClient = helmClient
|
||||||
|
|
||||||
tfClient, err := terraform.New(ctx, filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeWorkingDir))
|
tfClient, err := terraform.New(ctx, filepath.Join(upgradeWorkspace, upgradeID, constants.TerraformUpgradeWorkingDir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("setting up terraform client: %w", err)
|
return nil, fmt.Errorf("setting up terraform client: %w", err)
|
||||||
}
|
}
|
||||||
@ -170,14 +173,14 @@ func (u *Upgrader) AddManualStateMigration(migration terraform.StateMigration) {
|
|||||||
|
|
||||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||||
// If the files that will be written during the upgrade already exist, it returns an error.
|
// If the files that will be written during the upgrade already exist, it returns an error.
|
||||||
func (u *Upgrader) CheckTerraformMigrations() error {
|
func (u *Upgrader) CheckTerraformMigrations(upgradeWorkspace string) error {
|
||||||
return u.tfUpgrader.CheckTerraformMigrations(u.upgradeID, constants.TerraformUpgradeBackupDir)
|
return u.tfUpgrader.CheckTerraformMigrations(upgradeWorkspace, u.upgradeID, constants.TerraformUpgradeBackupDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
|
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
|
||||||
// aborted by the user.
|
// aborted by the user.
|
||||||
func (u *Upgrader) CleanUpTerraformMigrations() error {
|
func (u *Upgrader) CleanUpTerraformMigrations(upgradeWorkspace string) error {
|
||||||
return u.tfUpgrader.CleanUpTerraformMigrations(u.upgradeID)
|
return u.tfUpgrader.CleanUpTerraformMigrations(upgradeWorkspace, u.upgradeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlanTerraformMigrations prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade.
|
// PlanTerraformMigrations prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade.
|
||||||
@ -191,7 +194,7 @@ func (u *Upgrader) PlanTerraformMigrations(ctx context.Context, opts upgrade.Ter
|
|||||||
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
||||||
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
||||||
// By the new one.
|
// By the new one.
|
||||||
func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) error {
|
func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, opts upgrade.TerraformUpgradeOptions) (clusterid.File, error) {
|
||||||
return u.tfUpgrader.ApplyTerraformMigrations(ctx, opts, u.upgradeID)
|
return u.tfUpgrader.ApplyTerraformMigrations(ctx, opts, u.upgradeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -104,25 +103,26 @@ func TestPrepareWorkspace(t *testing.T) {
|
|||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
file := file.NewHandler(afero.NewMemMapFs())
|
file := file.NewHandler(afero.NewMemMapFs())
|
||||||
|
testWorkspace := "unittest"
|
||||||
|
|
||||||
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
||||||
err := prepareWorkspace(path, file, constants.TerraformWorkingDir)
|
err := prepareWorkspace(path, file, testWorkspace)
|
||||||
|
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
checkFiles(t, file, func(err error) { assert.NoError(err) }, constants.TerraformWorkingDir, tc.fileList)
|
checkFiles(t, file, func(err error) { assert.NoError(err) }, testWorkspace, tc.fileList)
|
||||||
|
|
||||||
if tc.testAlreadyUnpacked {
|
if tc.testAlreadyUnpacked {
|
||||||
// Let's try the same again and check if we don't get a "file already exists" error.
|
// Let's try the same again and check if we don't get a "file already exists" error.
|
||||||
require.NoError(file.Remove(filepath.Join(constants.TerraformWorkingDir, "variables.tf")))
|
require.NoError(file.Remove(filepath.Join(testWorkspace, "variables.tf")))
|
||||||
err := prepareWorkspace(path, file, constants.TerraformWorkingDir)
|
err := prepareWorkspace(path, file, testWorkspace)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
checkFiles(t, file, func(err error) { assert.NoError(err) }, constants.TerraformWorkingDir, tc.fileList)
|
checkFiles(t, file, func(err error) { assert.NoError(err) }, testWorkspace, tc.fileList)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = cleanUpWorkspace(file, constants.TerraformWorkingDir)
|
err = cleanUpWorkspace(file, testWorkspace)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
checkFiles(t, file, func(err error) { assert.ErrorIs(err, fs.ErrNotExist) }, constants.TerraformWorkingDir, tc.fileList)
|
checkFiles(t, file, func(err error) { assert.ErrorIs(err, fs.ErrNotExist) }, testWorkspace, tc.fileList)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,9 @@ import (
|
|||||||
const (
|
const (
|
||||||
tfVersion = ">= 1.4.6"
|
tfVersion = ">= 1.4.6"
|
||||||
terraformVarsFile = "terraform.tfvars"
|
terraformVarsFile = "terraform.tfvars"
|
||||||
|
|
||||||
|
// terraformUpgradePlanFile is the file name of the zipfile created by Terraform plan for Constellation upgrades.
|
||||||
|
terraformUpgradePlanFile = "plan.zip"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrTerraformWorkspaceExistsWithDifferentVariables is returned when existing Terraform files differ from the version the CLI wants to extract.
|
// ErrTerraformWorkspaceExistsWithDifferentVariables is returned when existing Terraform files differ from the version the CLI wants to extract.
|
||||||
@ -438,9 +441,10 @@ func (c *Client) ApplyIAMConfig(ctx context.Context, provider cloudprovider.Prov
|
|||||||
return c.ShowIAM(ctx, provider)
|
return c.ShowIAM(ctx, provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Plan determines the diff that will be applied by Terraform. The plan output is written to the planFile.
|
// Plan determines the diff that will be applied by Terraform.
|
||||||
|
// The plan output is written to the Terraform working directory.
|
||||||
// If there is a diff, the returned bool is true. Otherwise, it is false.
|
// If there is a diff, the returned bool is true. Otherwise, it is false.
|
||||||
func (c *Client) Plan(ctx context.Context, logLevel LogLevel, planFile string) (bool, error) {
|
func (c *Client) Plan(ctx context.Context, logLevel LogLevel) (bool, error) {
|
||||||
if err := c.setLogLevel(logLevel); err != nil {
|
if err := c.setLogLevel(logLevel); err != nil {
|
||||||
return false, fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
|
return false, fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
|
||||||
}
|
}
|
||||||
@ -454,18 +458,19 @@ func (c *Client) Plan(ctx context.Context, logLevel LogLevel, planFile string) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
opts := []tfexec.PlanOption{
|
opts := []tfexec.PlanOption{
|
||||||
tfexec.Out(planFile),
|
tfexec.Out(terraformUpgradePlanFile),
|
||||||
}
|
}
|
||||||
return c.tf.Plan(ctx, opts...)
|
return c.tf.Plan(ctx, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShowPlan formats the diff in planFilePath and writes it to the specified output.
|
// ShowPlan formats the diff of a plan file in the Terraform working directory,
|
||||||
func (c *Client) ShowPlan(ctx context.Context, logLevel LogLevel, planFilePath string, output io.Writer) error {
|
// and writes it to the specified output.
|
||||||
|
func (c *Client) ShowPlan(ctx context.Context, logLevel LogLevel, output io.Writer) error {
|
||||||
if err := c.setLogLevel(logLevel); err != nil {
|
if err := c.setLogLevel(logLevel); err != nil {
|
||||||
return fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
|
return fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
planResult, err := c.tf.ShowPlanFileRaw(ctx, planFilePath)
|
planResult, err := c.tf.ShowPlanFileRaw(ctx, terraformUpgradePlanFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform show plan: %w", err)
|
return fmt.Errorf("terraform show plan: %w", err)
|
||||||
}
|
}
|
||||||
@ -575,6 +580,9 @@ func (c *Client) setLogLevel(logLevel LogLevel) error {
|
|||||||
if err := c.tf.SetLog(logLevel.String()); err != nil {
|
if err := c.tf.SetLog(logLevel.String()); err != nil {
|
||||||
return fmt.Errorf("set log level %s: %w", logLevel.String(), err)
|
return fmt.Errorf("set log level %s: %w", logLevel.String(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Terraform writes its log to the working directory.
|
||||||
|
// => Set the log path to the parent directory to have it in the user's working directory.
|
||||||
if err := c.tf.SetLogPath(filepath.Join("..", constants.TerraformLogFile)); err != nil {
|
if err := c.tf.SetLogPath(filepath.Join("..", constants.TerraformLogFile)); err != nil {
|
||||||
return fmt.Errorf("set log path: %w", err)
|
return fmt.Errorf("set log path: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -92,7 +92,7 @@ func TestPrepareCluster(t *testing.T) {
|
|||||||
c := &Client{
|
c := &Client{
|
||||||
tf: &stubTerraform{},
|
tf: &stubTerraform{},
|
||||||
file: file.NewHandler(tc.fs),
|
file: file.NewHandler(tc.fs),
|
||||||
workingDir: constants.TerraformWorkingDir,
|
workingDir: "unittest",
|
||||||
}
|
}
|
||||||
|
|
||||||
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
||||||
@ -445,7 +445,7 @@ func TestCreateCluster(t *testing.T) {
|
|||||||
c := &Client{
|
c := &Client{
|
||||||
tf: tc.tf,
|
tf: tc.tf,
|
||||||
file: file.NewHandler(tc.fs),
|
file: file.NewHandler(tc.fs),
|
||||||
workingDir: constants.TerraformWorkingDir,
|
workingDir: "unittest",
|
||||||
}
|
}
|
||||||
|
|
||||||
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
||||||
@ -835,7 +835,7 @@ func TestCleanupWorkspace(t *testing.T) {
|
|||||||
c := &Client{
|
c := &Client{
|
||||||
file: file,
|
file: file,
|
||||||
tf: &stubTerraform{},
|
tf: &stubTerraform{},
|
||||||
workingDir: constants.TerraformWorkingDir,
|
workingDir: "unittest",
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.CleanUpWorkspace()
|
err := c.CleanUpWorkspace()
|
||||||
@ -1019,7 +1019,7 @@ func TestPlan(t *testing.T) {
|
|||||||
workingDir: tc.pathBase,
|
workingDir: tc.pathBase,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.Plan(context.Background(), LogLevelDebug, constants.TerraformUpgradePlanFile)
|
_, err := c.Plan(context.Background(), LogLevelDebug)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
require.Error(err)
|
require.Error(err)
|
||||||
} else {
|
} else {
|
||||||
@ -1078,7 +1078,7 @@ func TestShowPlan(t *testing.T) {
|
|||||||
workingDir: tc.pathBase,
|
workingDir: tc.pathBase,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.ShowPlan(context.Background(), LogLevelDebug, "", bytes.NewBuffer(nil))
|
err := c.ShowPlan(context.Background(), LogLevelDebug, bytes.NewBuffer(nil))
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
require.Error(err)
|
require.Error(err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -19,34 +19,29 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TfMigrationCmd is an interface for all terraform upgrade / migration commands.
|
|
||||||
type TfMigrationCmd interface {
|
|
||||||
CheckTerraformMigrations(file file.Handler) error
|
|
||||||
Plan(ctx context.Context, file file.Handler, outWriter io.Writer) (bool, error)
|
|
||||||
Apply(ctx context.Context, fileHandler file.Handler) error
|
|
||||||
String() string
|
|
||||||
UpgradeID() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IAMMigrateCmd is a terraform migration command for IAM. Which is used for the tfMigrationClient.
|
// IAMMigrateCmd is a terraform migration command for IAM. Which is used for the tfMigrationClient.
|
||||||
type IAMMigrateCmd struct {
|
type IAMMigrateCmd struct {
|
||||||
tf tfIAMClient
|
tf tfIAMClient
|
||||||
upgradeID string
|
upgradeID string
|
||||||
csp cloudprovider.Provider
|
iamWorkspace string
|
||||||
logLevel terraform.LogLevel
|
upgradeWorkspace string
|
||||||
|
csp cloudprovider.Provider
|
||||||
|
logLevel terraform.LogLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIAMMigrateCmd creates a new IAMMigrateCmd.
|
// NewIAMMigrateCmd creates a new IAMMigrateCmd.
|
||||||
func NewIAMMigrateCmd(ctx context.Context, upgradeID string, csp cloudprovider.Provider, logLevel terraform.LogLevel) (*IAMMigrateCmd, error) {
|
func NewIAMMigrateCmd(ctx context.Context, iamWorkspace, upgradeWorkspace, upgradeID string, csp cloudprovider.Provider, logLevel terraform.LogLevel) (*IAMMigrateCmd, error) {
|
||||||
tfClient, err := terraform.New(ctx, filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformIAMUpgradeWorkingDir))
|
tfClient, err := terraform.New(ctx, filepath.Join(upgradeWorkspace, upgradeID, constants.TerraformIAMUpgradeWorkingDir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("setting up terraform client: %w", err)
|
return nil, fmt.Errorf("setting up terraform client: %w", err)
|
||||||
}
|
}
|
||||||
return &IAMMigrateCmd{
|
return &IAMMigrateCmd{
|
||||||
tf: tfClient,
|
tf: tfClient,
|
||||||
upgradeID: upgradeID,
|
upgradeID: upgradeID,
|
||||||
csp: csp,
|
iamWorkspace: iamWorkspace,
|
||||||
logLevel: logLevel,
|
upgradeWorkspace: upgradeWorkspace,
|
||||||
|
csp: csp,
|
||||||
|
logLevel: logLevel,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +57,7 @@ func (c *IAMMigrateCmd) UpgradeID() string {
|
|||||||
|
|
||||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||||
func (c *IAMMigrateCmd) CheckTerraformMigrations(file file.Handler) error {
|
func (c *IAMMigrateCmd) CheckTerraformMigrations(file file.Handler) error {
|
||||||
return checkTerraformMigrations(file, c.upgradeID, constants.TerraformIAMUpgradeBackupDir)
|
return checkTerraformMigrations(file, c.upgradeWorkspace, c.upgradeID, constants.TerraformIAMUpgradeBackupDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Plan prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade, writing the plan to the outWriter.
|
// Plan prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade, writing the plan to the outWriter.
|
||||||
@ -70,20 +65,20 @@ func (c *IAMMigrateCmd) Plan(ctx context.Context, file file.Handler, outWriter i
|
|||||||
templateDir := filepath.Join("terraform", "iam", strings.ToLower(c.csp.String()))
|
templateDir := filepath.Join("terraform", "iam", strings.ToLower(c.csp.String()))
|
||||||
if err := terraform.PrepareIAMUpgradeWorkspace(file,
|
if err := terraform.PrepareIAMUpgradeWorkspace(file,
|
||||||
templateDir,
|
templateDir,
|
||||||
constants.TerraformIAMWorkingDir,
|
c.iamWorkspace,
|
||||||
filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir),
|
filepath.Join(c.upgradeWorkspace, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir),
|
||||||
filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeBackupDir),
|
filepath.Join(c.upgradeWorkspace, c.upgradeID, constants.TerraformIAMUpgradeBackupDir),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return false, fmt.Errorf("preparing terraform workspace: %w", err)
|
return false, fmt.Errorf("preparing terraform workspace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hasDiff, err := c.tf.Plan(ctx, c.logLevel, constants.TerraformUpgradePlanFile)
|
hasDiff, err := c.tf.Plan(ctx, c.logLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("terraform plan: %w", err)
|
return false, fmt.Errorf("terraform plan: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasDiff {
|
if hasDiff {
|
||||||
if err := c.tf.ShowPlan(ctx, c.logLevel, constants.TerraformUpgradePlanFile, outWriter); err != nil {
|
if err := c.tf.ShowPlan(ctx, c.logLevel, outWriter); err != nil {
|
||||||
return false, fmt.Errorf("terraform show plan: %w", err)
|
return false, fmt.Errorf("terraform show plan: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -97,14 +92,17 @@ func (c *IAMMigrateCmd) Apply(ctx context.Context, fileHandler file.Handler) err
|
|||||||
return fmt.Errorf("terraform apply: %w", err)
|
return fmt.Errorf("terraform apply: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fileHandler.RemoveAll(constants.TerraformIAMWorkingDir); err != nil {
|
if err := fileHandler.RemoveAll(c.iamWorkspace); err != nil {
|
||||||
return fmt.Errorf("removing old terraform directory: %w", err)
|
return fmt.Errorf("removing old terraform directory: %w", err)
|
||||||
}
|
}
|
||||||
if err := fileHandler.CopyDir(filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir), constants.TerraformIAMWorkingDir); err != nil {
|
if err := fileHandler.CopyDir(
|
||||||
|
filepath.Join(c.upgradeWorkspace, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir),
|
||||||
|
c.iamWorkspace,
|
||||||
|
); err != nil {
|
||||||
return fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
return fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fileHandler.RemoveAll(filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir)); err != nil {
|
if err := fileHandler.RemoveAll(filepath.Join(c.upgradeWorkspace, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir)); err != nil {
|
||||||
return fmt.Errorf("removing terraform upgrade directory: %w", err)
|
return fmt.Errorf("removing terraform upgrade directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,14 @@ func TestIAMMigrate(t *testing.T) {
|
|||||||
fs, file := setupMemFSAndFileHandler(t, []string{"terraform.tfvars", "terraform.tfstate"}, []byte("OLD"))
|
fs, file := setupMemFSAndFileHandler(t, []string{"terraform.tfvars", "terraform.tfstate"}, []byte("OLD"))
|
||||||
// act
|
// act
|
||||||
fakeTfClient := &tfClientStub{upgradeID, file}
|
fakeTfClient := &tfClientStub{upgradeID, file}
|
||||||
sut := &IAMMigrateCmd{fakeTfClient, upgradeID, cloudprovider.AWS, terraform.LogLevelDebug}
|
sut := &IAMMigrateCmd{
|
||||||
|
tf: fakeTfClient,
|
||||||
|
upgradeID: upgradeID,
|
||||||
|
csp: cloudprovider.AWS,
|
||||||
|
logLevel: terraform.LogLevelDebug,
|
||||||
|
iamWorkspace: constants.TerraformIAMWorkingDir,
|
||||||
|
upgradeWorkspace: constants.UpgradeDir,
|
||||||
|
}
|
||||||
hasDiff, err := sut.Plan(context.Background(), file, bytes.NewBuffer(nil))
|
hasDiff, err := sut.Plan(context.Background(), file, bytes.NewBuffer(nil))
|
||||||
// assert
|
// assert
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -82,11 +89,11 @@ type tfClientStub struct {
|
|||||||
file file.Handler
|
file file.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tfClientStub) Plan(_ context.Context, _ terraform.LogLevel, _ string) (bool, error) {
|
func (t *tfClientStub) Plan(_ context.Context, _ terraform.LogLevel) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tfClientStub) ShowPlan(_ context.Context, _ terraform.LogLevel, _ string, _ io.Writer) error {
|
func (t *tfClientStub) ShowPlan(_ context.Context, _ terraform.LogLevel, _ io.Writer) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,14 +47,16 @@ type TerraformUpgradeOptions struct {
|
|||||||
// CSP is the cloud provider to perform the upgrade on.
|
// CSP is the cloud provider to perform the upgrade on.
|
||||||
CSP cloudprovider.Provider
|
CSP cloudprovider.Provider
|
||||||
// Vars are the Terraform variables used for the upgrade.
|
// Vars are the Terraform variables used for the upgrade.
|
||||||
Vars terraform.Variables
|
Vars terraform.Variables
|
||||||
|
TFWorkspace string
|
||||||
|
UpgradeWorkspace string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||||
func checkTerraformMigrations(file file.Handler, upgradeID, upgradeSubDir string) error {
|
func checkTerraformMigrations(file file.Handler, upgradeWorkspace, upgradeID, upgradeSubDir string) error {
|
||||||
var existingFiles []string
|
var existingFiles []string
|
||||||
filesToCheck := []string{
|
filesToCheck := []string{
|
||||||
filepath.Join(constants.UpgradeDir, upgradeID, upgradeSubDir),
|
filepath.Join(upgradeWorkspace, upgradeID, upgradeSubDir),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range filesToCheck {
|
for _, f := range filesToCheck {
|
||||||
@ -71,8 +73,8 @@ func checkTerraformMigrations(file file.Handler, upgradeID, upgradeSubDir string
|
|||||||
|
|
||||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||||
// If the files that will be written during the upgrade already exist, it returns an error.
|
// If the files that will be written during the upgrade already exist, it returns an error.
|
||||||
func (u *TerraformUpgrader) CheckTerraformMigrations(upgradeID, upgradeSubDir string) error {
|
func (u *TerraformUpgrader) CheckTerraformMigrations(upgradeWorkspace, upgradeID, upgradeSubDir string) error {
|
||||||
return checkTerraformMigrations(u.fileHandler, upgradeID, upgradeSubDir)
|
return checkTerraformMigrations(u.fileHandler, upgradeWorkspace, upgradeID, upgradeSubDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkFileExists checks whether a file exists and adds it to the existingFiles slice if it does.
|
// checkFileExists checks whether a file exists and adds it to the existingFiles slice if it does.
|
||||||
@ -96,30 +98,22 @@ func (u *TerraformUpgrader) PlanTerraformMigrations(ctx context.Context, opts Te
|
|||||||
// Prepare the new Terraform workspace and backup the old one
|
// Prepare the new Terraform workspace and backup the old one
|
||||||
err := u.tf.PrepareUpgradeWorkspace(
|
err := u.tf.PrepareUpgradeWorkspace(
|
||||||
filepath.Join("terraform", strings.ToLower(opts.CSP.String())),
|
filepath.Join("terraform", strings.ToLower(opts.CSP.String())),
|
||||||
constants.TerraformWorkingDir,
|
opts.TFWorkspace,
|
||||||
filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeWorkingDir),
|
filepath.Join(opts.UpgradeWorkspace, upgradeID, constants.TerraformUpgradeWorkingDir),
|
||||||
filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeBackupDir),
|
filepath.Join(opts.UpgradeWorkspace, upgradeID, constants.TerraformUpgradeBackupDir),
|
||||||
opts.Vars,
|
opts.Vars,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("preparing terraform workspace: %w", err)
|
return false, fmt.Errorf("preparing terraform workspace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup the old constellation-id.json file
|
hasDiff, err := u.tf.Plan(ctx, opts.LogLevel)
|
||||||
if err := u.fileHandler.CopyFile(
|
|
||||||
constants.ClusterIDsFileName,
|
|
||||||
filepath.Join(constants.UpgradeDir, upgradeID, constants.ClusterIDsFileName+".old"),
|
|
||||||
); err != nil {
|
|
||||||
return false, fmt.Errorf("backing up %s: %w", constants.ClusterIDsFileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasDiff, err := u.tf.Plan(ctx, opts.LogLevel, constants.TerraformUpgradePlanFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("terraform plan: %w", err)
|
return false, fmt.Errorf("terraform plan: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasDiff {
|
if hasDiff {
|
||||||
if err := u.tf.ShowPlan(ctx, opts.LogLevel, constants.TerraformUpgradePlanFile, u.outWriter); err != nil {
|
if err := u.tf.ShowPlan(ctx, opts.LogLevel, u.outWriter); err != nil {
|
||||||
return false, fmt.Errorf("terraform show plan: %w", err)
|
return false, fmt.Errorf("terraform show plan: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,22 +123,16 @@ func (u *TerraformUpgrader) PlanTerraformMigrations(ctx context.Context, opts Te
|
|||||||
|
|
||||||
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
|
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
|
||||||
// aborted by the user.
|
// aborted by the user.
|
||||||
func (u *TerraformUpgrader) CleanUpTerraformMigrations(upgradeID string) error {
|
func (u *TerraformUpgrader) CleanUpTerraformMigrations(upgradeWorkspace, upgradeID string) error {
|
||||||
return CleanUpTerraformMigrations(upgradeID, u.fileHandler)
|
return CleanUpTerraformMigrations(upgradeWorkspace, upgradeID, u.fileHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUpTerraformMigrations cleans up the Terraform upgrade directory.
|
// CleanUpTerraformMigrations cleans up the Terraform upgrade directory.
|
||||||
func CleanUpTerraformMigrations(upgradeID string, fileHandler file.Handler) error {
|
func CleanUpTerraformMigrations(upgradeWorkspace, upgradeID string, fileHandler file.Handler) error {
|
||||||
cleanupFiles := []string{
|
upgradeDir := filepath.Join(upgradeWorkspace, upgradeID)
|
||||||
filepath.Join(constants.UpgradeDir, upgradeID),
|
if err := fileHandler.RemoveAll(upgradeDir); err != nil {
|
||||||
|
return fmt.Errorf("cleaning up file %s: %w", upgradeDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range cleanupFiles {
|
|
||||||
if err := fileHandler.RemoveAll(f); err != nil {
|
|
||||||
return fmt.Errorf("cleaning up file %s: %w", f, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,68 +140,54 @@ func CleanUpTerraformMigrations(upgradeID string, fileHandler file.Handler) erro
|
|||||||
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
||||||
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
||||||
// By the new one.
|
// By the new one.
|
||||||
func (u *TerraformUpgrader) ApplyTerraformMigrations(ctx context.Context, opts TerraformUpgradeOptions, upgradeID string) error {
|
func (u *TerraformUpgrader) ApplyTerraformMigrations(ctx context.Context, opts TerraformUpgradeOptions, upgradeID string) (clusterid.File, error) {
|
||||||
tfOutput, err := u.tf.CreateCluster(ctx, opts.CSP, opts.LogLevel)
|
tfOutput, err := u.tf.CreateCluster(ctx, opts.CSP, opts.LogLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("terraform apply: %w", err)
|
return clusterid.File{}, fmt.Errorf("terraform apply: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
outputFileContents := clusterid.File{
|
clusterID := clusterid.File{
|
||||||
CloudProvider: opts.CSP,
|
CloudProvider: opts.CSP,
|
||||||
InitSecret: []byte(tfOutput.Secret),
|
InitSecret: []byte(tfOutput.Secret),
|
||||||
IP: tfOutput.IP,
|
IP: tfOutput.IP,
|
||||||
APIServerCertSANs: tfOutput.APIServerCertSANs,
|
APIServerCertSANs: tfOutput.APIServerCertSANs,
|
||||||
UID: tfOutput.UID,
|
UID: tfOutput.UID,
|
||||||
}
|
}
|
||||||
// AttestationURL is only set for Azure.
|
|
||||||
|
// Patch MAA policy if we applied an Azure upgrade.
|
||||||
if tfOutput.Azure != nil {
|
if tfOutput.Azure != nil {
|
||||||
if err := u.policyPatcher.Patch(ctx, tfOutput.Azure.AttestationURL); err != nil {
|
if err := u.policyPatcher.Patch(ctx, tfOutput.Azure.AttestationURL); err != nil {
|
||||||
return fmt.Errorf("patching policies: %w", err)
|
return clusterid.File{}, fmt.Errorf("patching policies: %w", err)
|
||||||
}
|
}
|
||||||
outputFileContents.AttestationURL = tfOutput.Azure.AttestationURL
|
clusterID.AttestationURL = tfOutput.Azure.AttestationURL
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := u.fileHandler.RemoveAll(constants.TerraformWorkingDir); err != nil {
|
if err := u.fileHandler.RemoveAll(opts.TFWorkspace); err != nil {
|
||||||
return fmt.Errorf("removing old terraform directory: %w", err)
|
return clusterid.File{}, fmt.Errorf("removing old terraform directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := u.fileHandler.CopyDir(filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeWorkingDir), constants.TerraformWorkingDir); err != nil {
|
if err := u.fileHandler.CopyDir(
|
||||||
return fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
filepath.Join(opts.UpgradeWorkspace, upgradeID, constants.TerraformUpgradeWorkingDir),
|
||||||
|
opts.TFWorkspace,
|
||||||
|
); err != nil {
|
||||||
|
return clusterid.File{}, fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := u.fileHandler.RemoveAll(filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeWorkingDir)); err != nil {
|
if err := u.fileHandler.RemoveAll(filepath.Join(opts.UpgradeWorkspace, upgradeID, constants.TerraformUpgradeWorkingDir)); err != nil {
|
||||||
return fmt.Errorf("removing terraform upgrade directory: %w", err)
|
return clusterid.File{}, fmt.Errorf("removing terraform upgrade directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := u.mergeClusterIDFile(outputFileContents); err != nil {
|
return clusterID, nil
|
||||||
return fmt.Errorf("merging migration output into %s: %w", constants.ClusterIDsFileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergeClusterIDFile merges the output of the migration into the constellation-id.json file.
|
|
||||||
func (u *TerraformUpgrader) mergeClusterIDFile(migrationOutput clusterid.File) error {
|
|
||||||
idFile := &clusterid.File{}
|
|
||||||
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFileName, idFile); err != nil {
|
|
||||||
return fmt.Errorf("reading %s: %w", constants.ClusterIDsFileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := u.fileHandler.WriteJSON(constants.ClusterIDsFileName, idFile.Merge(migrationOutput), file.OptOverwrite); err != nil {
|
|
||||||
return fmt.Errorf("writing %s: %w", constants.ClusterIDsFileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type tfClientCommon interface {
|
type tfClientCommon interface {
|
||||||
ShowPlan(ctx context.Context, logLevel terraform.LogLevel, planFilePath string, output io.Writer) error
|
ShowPlan(ctx context.Context, logLevel terraform.LogLevel, output io.Writer) error
|
||||||
Plan(ctx context.Context, logLevel terraform.LogLevel, planFile string) (bool, error)
|
Plan(ctx context.Context, logLevel terraform.LogLevel) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tfResourceClient is a Terraform client for managing cluster resources.
|
// tfResourceClient is a Terraform client for managing cluster resources.
|
||||||
type tfResourceClient interface {
|
type tfResourceClient interface {
|
||||||
PrepareUpgradeWorkspace(path, oldWorkingDir, newWorkingDir, backupDir string, vars terraform.Variables) error
|
PrepareUpgradeWorkspace(embeddedPath, oldWorkingDir, newWorkingDir, backupDir string, vars terraform.Variables) error
|
||||||
CreateCluster(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.ApplyOutput, error)
|
CreateCluster(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.ApplyOutput, error)
|
||||||
tfClientCommon
|
tfClientCommon
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ func TestCheckTerraformMigrations(t *testing.T) {
|
|||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
u := upgrader(tc.workspace)
|
u := upgrader(tc.workspace)
|
||||||
err := u.CheckTerraformMigrations(tc.upgradeID, constants.TerraformUpgradeBackupDir)
|
err := u.CheckTerraformMigrations(constants.UpgradeDir, tc.upgradeID, constants.TerraformUpgradeBackupDir)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
return
|
return
|
||||||
@ -95,14 +95,14 @@ func TestPlanTerraformMigrations(t *testing.T) {
|
|||||||
"success no diff": {
|
"success no diff": {
|
||||||
upgradeID: "1234",
|
upgradeID: "1234",
|
||||||
tf: &stubTerraformClient{},
|
tf: &stubTerraformClient{},
|
||||||
workspace: workspace([]string{constants.ClusterIDsFileName}),
|
workspace: workspace([]string{}),
|
||||||
},
|
},
|
||||||
"success diff": {
|
"success diff": {
|
||||||
upgradeID: "1234",
|
upgradeID: "1234",
|
||||||
tf: &stubTerraformClient{
|
tf: &stubTerraformClient{
|
||||||
hasDiff: true,
|
hasDiff: true,
|
||||||
},
|
},
|
||||||
workspace: workspace([]string{constants.ClusterIDsFileName}),
|
workspace: workspace([]string{}),
|
||||||
want: true,
|
want: true,
|
||||||
},
|
},
|
||||||
"prepare workspace error": {
|
"prepare workspace error": {
|
||||||
@ -110,26 +110,14 @@ func TestPlanTerraformMigrations(t *testing.T) {
|
|||||||
tf: &stubTerraformClient{
|
tf: &stubTerraformClient{
|
||||||
prepareWorkspaceErr: assert.AnError,
|
prepareWorkspaceErr: assert.AnError,
|
||||||
},
|
},
|
||||||
workspace: workspace([]string{constants.ClusterIDsFileName}),
|
workspace: workspace([]string{}),
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"constellation-id.json does not exist": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &stubTerraformClient{},
|
|
||||||
workspace: workspace(nil),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"constellation-id backup already exists": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &stubTerraformClient{},
|
|
||||||
workspace: workspace([]string{filepath.Join(constants.UpgradeDir, "1234", constants.ClusterIDsFileName+".old")}),
|
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"plan error": {
|
"plan error": {
|
||||||
tf: &stubTerraformClient{
|
tf: &stubTerraformClient{
|
||||||
planErr: assert.AnError,
|
planErr: assert.AnError,
|
||||||
},
|
},
|
||||||
workspace: workspace([]string{constants.ClusterIDsFileName}),
|
workspace: workspace([]string{}),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"show plan error no diff": {
|
"show plan error no diff": {
|
||||||
@ -137,7 +125,7 @@ func TestPlanTerraformMigrations(t *testing.T) {
|
|||||||
tf: &stubTerraformClient{
|
tf: &stubTerraformClient{
|
||||||
showErr: assert.AnError,
|
showErr: assert.AnError,
|
||||||
},
|
},
|
||||||
workspace: workspace([]string{constants.ClusterIDsFileName}),
|
workspace: workspace([]string{}),
|
||||||
},
|
},
|
||||||
"show plan error diff": {
|
"show plan error diff": {
|
||||||
upgradeID: "1234",
|
upgradeID: "1234",
|
||||||
@ -145,7 +133,7 @@ func TestPlanTerraformMigrations(t *testing.T) {
|
|||||||
showErr: assert.AnError,
|
showErr: assert.AnError,
|
||||||
hasDiff: true,
|
hasDiff: true,
|
||||||
},
|
},
|
||||||
workspace: workspace([]string{constants.ClusterIDsFileName}),
|
workspace: workspace([]string{}),
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -196,12 +184,11 @@ func TestApplyTerraformMigrations(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
upgradeID string
|
upgradeID string
|
||||||
tf tfResourceClient
|
tf tfResourceClient
|
||||||
policyPatcher stubPolicyPatcher
|
policyPatcher stubPolicyPatcher
|
||||||
fs file.Handler
|
fs file.Handler
|
||||||
skipIDFileCreation bool // if true, do not create the constellation-id.json file
|
wantErr bool
|
||||||
wantErr bool
|
|
||||||
}{
|
}{
|
||||||
"success": {
|
"success": {
|
||||||
upgradeID: "1234",
|
upgradeID: "1234",
|
||||||
@ -218,14 +205,6 @@ func TestApplyTerraformMigrations(t *testing.T) {
|
|||||||
policyPatcher: stubPolicyPatcher{},
|
policyPatcher: stubPolicyPatcher{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"constellation-id.json does not exist": {
|
|
||||||
upgradeID: "1234",
|
|
||||||
tf: &stubTerraformClient{},
|
|
||||||
fs: fileHandler("1234"),
|
|
||||||
policyPatcher: stubPolicyPatcher{},
|
|
||||||
skipIDFileCreation: true,
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
@ -234,21 +213,15 @@ func TestApplyTerraformMigrations(t *testing.T) {
|
|||||||
|
|
||||||
u := upgrader(tc.tf, tc.fs)
|
u := upgrader(tc.tf, tc.fs)
|
||||||
|
|
||||||
if !tc.skipIDFileCreation {
|
|
||||||
require.NoError(
|
|
||||||
tc.fs.Write(
|
|
||||||
filepath.Join(constants.ClusterIDsFileName),
|
|
||||||
[]byte("{}"),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := TerraformUpgradeOptions{
|
opts := TerraformUpgradeOptions{
|
||||||
LogLevel: terraform.LogLevelDebug,
|
LogLevel: terraform.LogLevelDebug,
|
||||||
CSP: cloudprovider.Unknown,
|
CSP: cloudprovider.Unknown,
|
||||||
Vars: &terraform.QEMUVariables{},
|
Vars: &terraform.QEMUVariables{},
|
||||||
|
TFWorkspace: "test",
|
||||||
|
UpgradeWorkspace: constants.UpgradeDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := u.ApplyTerraformMigrations(context.Background(), opts, tc.upgradeID)
|
_, err := u.ApplyTerraformMigrations(context.Background(), opts, tc.upgradeID)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
require.Error(err)
|
require.Error(err)
|
||||||
} else {
|
} else {
|
||||||
@ -328,7 +301,7 @@ func TestCleanUpTerraformMigrations(t *testing.T) {
|
|||||||
workspace := workspace(tc.workspaceFiles)
|
workspace := workspace(tc.workspaceFiles)
|
||||||
u := upgrader(workspace)
|
u := upgrader(workspace)
|
||||||
|
|
||||||
err := u.CleanUpTerraformMigrations(tc.upgradeID)
|
err := u.CleanUpTerraformMigrations(constants.UpgradeDir, tc.upgradeID)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
require.Error(err)
|
require.Error(err)
|
||||||
return
|
return
|
||||||
@ -359,19 +332,19 @@ type stubTerraformClient struct {
|
|||||||
CreateClusterErr error
|
CreateClusterErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *stubTerraformClient) PrepareUpgradeWorkspace(string, string, string, string, terraform.Variables) error {
|
func (u *stubTerraformClient) PrepareUpgradeWorkspace(_, _, _, _ string, _ terraform.Variables) error {
|
||||||
return u.prepareWorkspaceErr
|
return u.prepareWorkspaceErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *stubTerraformClient) ShowPlan(context.Context, terraform.LogLevel, string, io.Writer) error {
|
func (u *stubTerraformClient) ShowPlan(_ context.Context, _ terraform.LogLevel, _ io.Writer) error {
|
||||||
return u.showErr
|
return u.showErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *stubTerraformClient) Plan(context.Context, terraform.LogLevel, string) (bool, error) {
|
func (u *stubTerraformClient) Plan(_ context.Context, _ terraform.LogLevel) (bool, error) {
|
||||||
return u.hasDiff, u.planErr
|
return u.hasDiff, u.planErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *stubTerraformClient) CreateCluster(context.Context, cloudprovider.Provider, terraform.LogLevel) (terraform.ApplyOutput, error) {
|
func (u *stubTerraformClient) CreateCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (terraform.ApplyOutput, error) {
|
||||||
return terraform.ApplyOutput{}, u.CreateClusterErr
|
return terraform.ApplyOutput{}, u.CreateClusterErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,6 +352,6 @@ type stubPolicyPatcher struct {
|
|||||||
patchErr error
|
patchErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *stubPolicyPatcher) PatchPolicy(context.Context, string) error {
|
func (p *stubPolicyPatcher) PatchPolicy(_ context.Context, _ string) error {
|
||||||
return p.patchErr
|
return p.patchErr
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c
|
|||||||
}
|
}
|
||||||
if len(ips) == 0 {
|
if len(ips) == 0 {
|
||||||
var idFile clusterIDsFile
|
var idFile clusterIDsFile
|
||||||
if err := fileHandler.ReadJSON(constants.ClusterIDsFileName, &idFile); err != nil {
|
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
|
||||||
return fmt.Errorf("reading cluster IDs file: %w", err)
|
return fmt.Errorf("reading cluster IDs file: %w", err)
|
||||||
}
|
}
|
||||||
ips = []string{idFile.IP}
|
ips = []string{idFile.IP}
|
||||||
|
@ -56,10 +56,10 @@ Work with the Constellation configuration file.
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation config generate
|
## constellation config generate
|
||||||
@ -78,7 +78,6 @@ constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags]
|
|||||||
|
|
||||||
```
|
```
|
||||||
-a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-trustedlaunch|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used
|
-a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-trustedlaunch|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used
|
||||||
-f, --file string path to output file, or '-' for stdout (default "constellation-conf.yaml")
|
|
||||||
-h, --help help for generate
|
-h, --help help for generate
|
||||||
-k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.26")
|
-k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.26")
|
||||||
```
|
```
|
||||||
@ -86,10 +85,10 @@ constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation config fetch-measurements
|
## constellation config fetch-measurements
|
||||||
@ -117,10 +116,10 @@ constellation config fetch-measurements [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation config instance-types
|
## constellation config instance-types
|
||||||
@ -144,10 +143,10 @@ constellation config instance-types [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation config kubernetes-versions
|
## constellation config kubernetes-versions
|
||||||
@ -171,10 +170,10 @@ constellation config kubernetes-versions [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation config migrate
|
## constellation config migrate
|
||||||
@ -198,10 +197,10 @@ constellation config migrate [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation create
|
## constellation create
|
||||||
@ -226,10 +225,10 @@ constellation create [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation init
|
## constellation init
|
||||||
@ -249,20 +248,19 @@ constellation init [flags]
|
|||||||
### Options
|
### Options
|
||||||
|
|
||||||
```
|
```
|
||||||
--conformance enable conformance mode
|
--conformance enable conformance mode
|
||||||
-h, --help help for init
|
-h, --help help for init
|
||||||
--master-secret string path to base64-encoded master secret
|
--merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config
|
||||||
--merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config
|
--skip-helm-wait install helm charts without waiting for deployments to be ready
|
||||||
--skip-helm-wait install helm charts without waiting for deployments to be ready
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation mini
|
## constellation mini
|
||||||
@ -282,10 +280,10 @@ Manage MiniConstellation clusters.
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation mini up
|
## constellation mini up
|
||||||
@ -305,7 +303,6 @@ constellation mini up [flags]
|
|||||||
### Options
|
### Options
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file to use for the cluster
|
|
||||||
-h, --help help for up
|
-h, --help help for up
|
||||||
--merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true)
|
--merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true)
|
||||||
```
|
```
|
||||||
@ -313,9 +310,10 @@ constellation mini up [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--debug enable debug logging
|
--debug enable debug logging
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--tf-log string Terraform log level (default "NONE")
|
--tf-log string Terraform log level (default "NONE")
|
||||||
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation mini down
|
## constellation mini down
|
||||||
@ -340,10 +338,10 @@ constellation mini down [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation status
|
## constellation status
|
||||||
@ -369,10 +367,10 @@ constellation status [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation verify
|
## constellation verify
|
||||||
@ -400,10 +398,10 @@ constellation verify [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation upgrade
|
## constellation upgrade
|
||||||
@ -423,10 +421,10 @@ Find and apply upgrades to your Constellation cluster.
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation upgrade check
|
## constellation upgrade check
|
||||||
@ -453,10 +451,10 @@ constellation upgrade check [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation upgrade apply
|
## constellation upgrade apply
|
||||||
@ -483,10 +481,10 @@ constellation upgrade apply [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation recover
|
## constellation recover
|
||||||
@ -506,18 +504,17 @@ constellation recover [flags]
|
|||||||
### Options
|
### Options
|
||||||
|
|
||||||
```
|
```
|
||||||
-e, --endpoint string endpoint of the instance, passed as HOST[:PORT]
|
-e, --endpoint string endpoint of the instance, passed as HOST[:PORT]
|
||||||
-h, --help help for recover
|
-h, --help help for recover
|
||||||
--master-secret string path to master secret file (default "constellation-mastersecret.json")
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation terminate
|
## constellation terminate
|
||||||
@ -544,10 +541,10 @@ constellation terminate [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam
|
## constellation iam
|
||||||
@ -567,10 +564,10 @@ Work with the IAM configuration on your cloud provider.
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam create
|
## constellation iam create
|
||||||
@ -592,10 +589,10 @@ Create IAM configuration on a cloud platform for your Constellation cluster.
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam create aws
|
## constellation iam create aws
|
||||||
@ -622,12 +619,12 @@ constellation iam create aws [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
--update-config update the config file with the specific IAM information
|
||||||
--update-config update the config file with the specific IAM information
|
-C, --workspace string path to the Constellation workspace
|
||||||
-y, --yes create the IAM configuration without further confirmation
|
-y, --yes create the IAM configuration without further confirmation
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam create azure
|
## constellation iam create azure
|
||||||
@ -654,12 +651,12 @@ constellation iam create azure [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
--update-config update the config file with the specific IAM information
|
||||||
--update-config update the config file with the specific IAM information
|
-C, --workspace string path to the Constellation workspace
|
||||||
-y, --yes create the IAM configuration without further confirmation
|
-y, --yes create the IAM configuration without further confirmation
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam create gcp
|
## constellation iam create gcp
|
||||||
@ -689,12 +686,12 @@ constellation iam create gcp [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
--update-config update the config file with the specific IAM information
|
||||||
--update-config update the config file with the specific IAM information
|
-C, --workspace string path to the Constellation workspace
|
||||||
-y, --yes create the IAM configuration without further confirmation
|
-y, --yes create the IAM configuration without further confirmation
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam destroy
|
## constellation iam destroy
|
||||||
@ -719,10 +716,10 @@ constellation iam destroy [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam upgrade
|
## constellation iam upgrade
|
||||||
@ -742,10 +739,10 @@ Find and apply upgrades to your IAM profile.
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation iam upgrade apply
|
## constellation iam upgrade apply
|
||||||
@ -765,16 +762,15 @@ constellation iam upgrade apply [flags]
|
|||||||
```
|
```
|
||||||
-h, --help help for apply
|
-h, --help help for apply
|
||||||
-y, --yes run upgrades without further confirmation
|
-y, --yes run upgrades without further confirmation
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
## constellation version
|
## constellation version
|
||||||
@ -798,9 +794,9 @@ constellation version [flags]
|
|||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
```
|
```
|
||||||
--config string path to the configuration file (default "constellation-conf.yaml")
|
--debug enable debug logging
|
||||||
--debug enable debug logging
|
--force disable version compatibility checks - might result in corrupted clusters
|
||||||
--force disable version compatibility checks - might result in corrupted clusters
|
--tf-log string Terraform log level (default "NONE")
|
||||||
--tf-log string Terraform log level (default "NONE")
|
-C, --workspace string path to the Constellation workspace
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ func TestUpgrade(t *testing.T) {
|
|||||||
|
|
||||||
// Migrate config if necessary.
|
// Migrate config if necessary.
|
||||||
log.Println("Migrating config if needed.")
|
log.Println("Migrating config if needed.")
|
||||||
cmd := exec.CommandContext(context.Background(), cli, "config", "migrate", "--config", constants.ConfigFilename, "--debug")
|
cmd := exec.CommandContext(context.Background(), cli, "config", "migrate", "--debug")
|
||||||
stdout, stderr, err := runCommandWithSeparateOutputs(cmd)
|
stdout, stderr, err := runCommandWithSeparateOutputs(cmd)
|
||||||
require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
||||||
log.Println(string(stdout))
|
log.Println(string(stdout))
|
||||||
|
@ -74,8 +74,8 @@ const (
|
|||||||
// Filenames.
|
// Filenames.
|
||||||
//
|
//
|
||||||
|
|
||||||
// ClusterIDsFileName filename that contains Constellation clusterID and IP.
|
// ClusterIDsFilename filename that contains Constellation clusterID and IP.
|
||||||
ClusterIDsFileName = "constellation-id.json"
|
ClusterIDsFilename = "constellation-id.json"
|
||||||
// ConfigFilename filename of Constellation config file.
|
// ConfigFilename filename of Constellation config file.
|
||||||
ConfigFilename = "constellation-conf.yaml"
|
ConfigFilename = "constellation-conf.yaml"
|
||||||
// LicenseFilename filename of Constellation license file.
|
// LicenseFilename filename of Constellation license file.
|
||||||
@ -88,8 +88,6 @@ const (
|
|||||||
TerraformWorkingDir = "constellation-terraform"
|
TerraformWorkingDir = "constellation-terraform"
|
||||||
// TerraformIAMWorkingDir is the directory name for the Terraform IAM Client workspace.
|
// TerraformIAMWorkingDir is the directory name for the Terraform IAM Client workspace.
|
||||||
TerraformIAMWorkingDir = "constellation-iam-terraform"
|
TerraformIAMWorkingDir = "constellation-iam-terraform"
|
||||||
// GCPServiceAccountKeyFile is the file name for the GCP service account key file.
|
|
||||||
GCPServiceAccountKeyFile = "gcpServiceAccountKey.json"
|
|
||||||
// ErrorLog file which contains server errors during init.
|
// ErrorLog file which contains server errors during init.
|
||||||
ErrorLog = "constellation-cluster.log"
|
ErrorLog = "constellation-cluster.log"
|
||||||
// ControlPlaneAdminConfFilename filepath to control plane kubernetes admin config.
|
// ControlPlaneAdminConfFilename filepath to control plane kubernetes admin config.
|
||||||
@ -157,8 +155,6 @@ const (
|
|||||||
MiniConstellationUID = "mini"
|
MiniConstellationUID = "mini"
|
||||||
// TerraformLogFile is the file name of the Terraform log file.
|
// TerraformLogFile is the file name of the Terraform log file.
|
||||||
TerraformLogFile = "terraform.log"
|
TerraformLogFile = "terraform.log"
|
||||||
// TerraformUpgradePlanFile is the file name of the zipfile created by Terraform plan for Constellation upgrades.
|
|
||||||
TerraformUpgradePlanFile = "plan.zip"
|
|
||||||
// TerraformUpgradeWorkingDir is the directory name for the Terraform workspace being used in an upgrade.
|
// TerraformUpgradeWorkingDir is the directory name for the Terraform workspace being used in an upgrade.
|
||||||
TerraformUpgradeWorkingDir = "terraform"
|
TerraformUpgradeWorkingDir = "terraform"
|
||||||
// TerraformIAMUpgradeWorkingDir is the directory name for the Terraform IAM workspace being used in an upgrade.
|
// TerraformIAMUpgradeWorkingDir is the directory name for the Terraform IAM workspace being used in an upgrade.
|
||||||
|
Loading…
Reference in New Issue
Block a user