cli: use state file on init and upgrade (#2395)

* [wip] use state file in CLI

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

tidy

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* use state file in CLI

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

take clusterConfig from IDFile for compat

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

various fixes

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

wip

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add GCP-specific values in Helm loader test

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove unnecessary pointer

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* write ClusterValues in one step

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* move stub to test file

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove mention of id-file

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* move output to `migrateTerraform`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* unconditional assignments converting from idFile

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* move require block in go modules file

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* fall back to id file on upgrade

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* tidy

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* fix linter check

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add notice to remove Terraform state check on manual migration

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add `name` field

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

fix name tests

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* return early if no Terraform diff

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* tidy

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* return infrastructure state even if no diff exists

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add TODO to remove comment

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* use state-file in miniconstellation

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* cli: remove id-file (#2402)

* remove id-file from `constellation create`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add file renaming to handler

* rename id-file after upgrade

* use idFile on `constellation init`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `constellation verify`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* linter fixes

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `constellation mini`

* remove id-file from `constellation recover`

* linter fixes

* remove id-file from `constellation terminate`

* fix initSecret type

* fix recover argument precedence

* fix terminate test

* generate

* add TODO to remove id-file removal

* Update cli/internal/cmd/init.go

Co-authored-by: Adrian Stobbe <stobbe.adrian@gmail.com>

* fix verify arg parse logic

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add version test

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from docs

* add file not found log

* use state-file in miniconstellation

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `constellation iam destroy`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `cdbg deploy`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

---------

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
Co-authored-by: Adrian Stobbe <stobbe.adrian@gmail.com>

* use state-file in CI

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* update orchestration docs

---------

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
Co-authored-by: Adrian Stobbe <stobbe.adrian@gmail.com>
This commit is contained in:
Moritz Sanft 2023-10-09 13:04:29 +02:00 committed by GitHub
parent dbf40d185c
commit 005e865a13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 1189 additions and 497 deletions

View File

@ -181,7 +181,7 @@ runs:
CSP: ${{ inputs.cloudProvider }} CSP: ${{ inputs.cloudProvider }}
run: | run: |
echo "::group::Download boot logs" echo "::group::Download boot logs"
CONSTELL_UID=$(yq '.uid' constellation-id.json) CONSTELL_UID=$(yq '.infrastructure.uid' constellation-state.yaml)
case $CSP in case $CSP in
azure) azure)
AZURE_RESOURCE_GROUP=$(yq eval ".provider.azure.resourceGroup" constellation-conf.yaml) AZURE_RESOURCE_GROUP=$(yq eval ".provider.azure.resourceGroup" constellation-conf.yaml)

View File

@ -39,14 +39,14 @@ runs:
- name: Constellation verify - name: Constellation verify
shell: bash shell: bash
run: constellation verify --cluster-id $(jq -r ".clusterID" constellation-id.json) --force run: constellation verify --cluster-id $(jq -r ".clusterValues.clusterID" constellation-state.yaml) --force
- name: Verify all nodes - name: Verify all nodes
shell: bash shell: bash
env: env:
KUBECONFIG: ${{ inputs.kubeconfig }} KUBECONFIG: ${{ inputs.kubeconfig }}
run: | run: |
clusterID=$(jq -r ".clusterID" constellation-id.json) clusterID=$(jq -r ".clusterValues.clusterID" constellation-state.yaml)
nodes=$(kubectl get nodes -o json | jq -r ".items[].metadata.name") nodes=$(kubectl get nodes -o json | jq -r ".items[].metadata.name")
for node in $nodes ; do for node in $nodes ; do

View File

@ -15,6 +15,14 @@ def go_dependencies():
sum = "h1:tdpHgTbmbvEIARu+bixzmleMi14+3imnpoFXz+Qzjp4=", sum = "h1:tdpHgTbmbvEIARu+bixzmleMi14+3imnpoFXz+Qzjp4=",
version = "v1.31.0-20230802163732-1c33ebd9ecfa.1", version = "v1.31.0-20230802163732-1c33ebd9ecfa.1",
) )
go_repository(
name = "cat_dario_mergo",
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "dario.cat/mergo",
sum = "h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=",
version = "v1.0.0",
)
go_repository( go_repository(
name = "cc_mvdan_editorconfig", name = "cc_mvdan_editorconfig",

View File

@ -48,7 +48,7 @@ type stubTerraformClient struct {
func (c *stubTerraformClient) ApplyCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (state.Infrastructure, error) { func (c *stubTerraformClient) ApplyCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (state.Infrastructure, error) {
return state.Infrastructure{ return state.Infrastructure{
ClusterEndpoint: c.ip, ClusterEndpoint: c.ip,
InitSecret: c.initSecret, InitSecret: []byte(c.initSecret),
UID: c.uid, UID: c.uid,
Azure: &state.Azure{ Azure: &state.Azure{
AttestationURL: c.attestationURL, AttestationURL: c.attestationURL,

View File

@ -151,6 +151,7 @@ go_test(
"//internal/cloud/gcpshared", "//internal/cloud/gcpshared",
"//internal/config", "//internal/config",
"//internal/constants", "//internal/constants",
"//internal/crypto",
"//internal/crypto/testvector", "//internal/crypto/testvector",
"//internal/file", "//internal/file",
"//internal/grpc/atlscredentials", "//internal/grpc/atlscredentials",

View File

@ -12,14 +12,12 @@ import (
"io/fs" "io/fs"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform" "github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
@ -172,35 +170,15 @@ func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler
} }
c.log.Debugf("Successfully created the cloud resources for the cluster") c.log.Debugf("Successfully created the cloud resources for the cluster")
idFile := convertToIDFile(infraState, provider) state := state.New().SetInfrastructure(infraState)
if err := fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone); err != nil { if err := state.WriteToFile(fileHandler, constants.StateFilename); err != nil {
return err return fmt.Errorf("writing state file: %w", err)
}
state := state.NewState(infraState)
if err := fileHandler.WriteYAML(constants.StateFilename, state, file.OptNone); err != nil {
return err
} }
cmd.Println("Your Constellation cluster was created successfully.") cmd.Println("Your Constellation cluster was created successfully.")
return nil return nil
} }
func convertToIDFile(infra state.Infrastructure, provider cloudprovider.Provider) clusterid.File {
var file clusterid.File
file.CloudProvider = provider
file.IP = infra.ClusterEndpoint
file.APIServerCertSANs = infra.APIServerCertSANs
file.InitSecret = []byte(infra.InitSecret) // Convert string to []byte
file.UID = infra.UID
if infra.Azure != nil {
file.AttestationURL = infra.Azure.AttestationURL
}
return file
}
// parseCreateFlags parses the flags of the create command. // parseCreateFlags parses the flags of the create command.
func (c *createCmd) parseCreateFlags(cmd *cobra.Command) (createFlags, error) { func (c *createCmd) parseCreateFlags(cmd *cobra.Command) (createFlags, error) {
yes, err := cmd.Flags().GetBool("yes") yes, err := cmd.Flags().GetBool("yes")
@ -256,9 +234,9 @@ func (c *createCmd) checkDirClean(fileHandler file.Handler) error {
if _, err := fileHandler.Stat(constants.MasterSecretFilename); !errors.Is(err, fs.ErrNotExist) { if _, err := fileHandler.Stat(constants.MasterSecretFilename); !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.MasterSecretFilename)) return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.MasterSecretFilename))
} }
c.log.Debugf("Checking cluster IDs file") c.log.Debugf("Checking state file")
if _, err := fileHandler.Stat(constants.ClusterIDsFilename); !errors.Is(err, fs.ErrNotExist) { if _, err := fileHandler.Stat(constants.StateFilename); !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous cluster IDs. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.ClusterIDsFilename)) return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous cluster state. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.StateFilename))
} }
return nil return nil

View File

@ -11,7 +11,6 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
@ -154,22 +153,16 @@ func TestCreate(t *testing.T) {
assert.False(tc.creator.createCalled) assert.False(tc.creator.createCalled)
} else { } else {
assert.True(tc.creator.createCalled) assert.True(tc.creator.createCalled)
var gotIDFile clusterid.File
require.NoError(fileHandler.ReadJSON(constants.ClusterIDsFilename, &gotIDFile))
assert.Equal(gotIDFile, clusterid.File{
IP: infraState.ClusterEndpoint,
CloudProvider: tc.provider,
})
var gotState state.State var gotState state.State
expectedState := state.Infrastructure{ expectedState := state.Infrastructure{
ClusterEndpoint: "192.0.2.1", ClusterEndpoint: "192.0.2.1",
APIServerCertSANs: []string{}, APIServerCertSANs: []string{},
InitSecret: []byte{},
} }
require.NoError(fileHandler.ReadYAML(constants.StateFilename, &gotState)) require.NoError(fileHandler.ReadYAML(constants.StateFilename, &gotState))
assert.Equal("v1", gotState.Version) assert.Equal("v1", gotState.Version)
assert.Equal(expectedState, gotState.Infrastructure) assert.Equal(expectedState, gotState.Infrastructure)
} }
} }
}) })

View File

@ -67,10 +67,10 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.AdminConfFilename)) return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.AdminConfFilename))
} }
c.log.Debugf("Checking if %q exists", c.pf.PrefixPrintablePath(constants.ClusterIDsFilename)) c.log.Debugf("Checking if %q exists", c.pf.PrefixPrintablePath(constants.StateFilename))
_, err = fsHandler.Stat(constants.ClusterIDsFilename) _, err = fsHandler.Stat(constants.StateFilename)
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.ClusterIDsFilename)) return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.StateFilename))
} }
gcpFileExists := false gcpFileExists := false

View File

@ -36,9 +36,9 @@ func TestIAMDestroy(t *testing.T) {
require.NoError(fh.Write(constants.AdminConfFilename, []byte(""))) require.NoError(fh.Write(constants.AdminConfFilename, []byte("")))
return fh return fh
} }
newFsWithClusterIDFile := func() file.Handler { newFsWithStateFile := func() file.Handler {
fh := file.NewHandler(afero.NewMemMapFs()) fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(fh.Write(constants.ClusterIDsFilename, []byte(""))) require.NoError(fh.Write(constants.StateFilename, []byte("")))
return fh return fh
} }
@ -56,8 +56,8 @@ func TestIAMDestroy(t *testing.T) {
yesFlag: "false", yesFlag: "false",
wantErr: true, wantErr: true,
}, },
"cluster running cluster ids": { "cluster running cluster state": {
fh: newFsWithClusterIDFile(), fh: newFsWithStateFile(),
iamDestroyer: &stubIAMDestroyer{}, iamDestroyer: &stubIAMDestroyer{},
yesFlag: "false", yesFlag: "false",
wantErr: true, wantErr: true,

View File

@ -36,13 +36,10 @@ import (
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/helm" "github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd" "github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/crypto"
@ -73,24 +70,19 @@ func NewInitCmd() *cobra.Command {
} }
type initCmd struct { type initCmd struct {
log debugLog log debugLog
merger configMerger merger configMerger
spinner spinnerInterf spinner spinnerInterf
fileHandler file.Handler fileHandler file.Handler
clusterShower infrastructureShower pf pathprefix.PathPrefixer
pf pathprefix.PathPrefixer
} }
func newInitCmd( func newInitCmd(fileHandler file.Handler, spinner spinnerInterf, merger configMerger, log debugLog) *initCmd {
clusterShower infrastructureShower, fileHandler file.Handler,
spinner spinnerInterf, merger configMerger, log debugLog,
) *initCmd {
return &initCmd{ return &initCmd{
log: log, log: log,
merger: merger, merger: merger,
spinner: spinner, spinner: spinner,
fileHandler: fileHandler, fileHandler: fileHandler,
clusterShower: clusterShower,
} }
} }
@ -116,12 +108,7 @@ func runInitialize(cmd *cobra.Command, _ []string) error {
defer cancel() defer cancel()
cmd.SetContext(ctx) cmd.SetContext(ctx)
tfClient, err := terraform.New(ctx, constants.TerraformWorkingDir) i := newInitCmd(fileHandler, spinner, &kubeconfigMerger{log: log}, log)
if err != nil {
return fmt.Errorf("creating Terraform client: %w", err)
}
i := newInitCmd(tfClient, fileHandler, spinner, &kubeconfigMerger{log: log}, log)
fetcher := attestationconfigapi.NewFetcher() fetcher := attestationconfigapi.NewFetcher()
newAttestationApplier := func(w io.Writer, kubeConfig string, log debugLog) (attestationConfigApplier, error) { newAttestationApplier := func(w io.Writer, kubeConfig string, log debugLog) (attestationConfigApplier, error) {
return kubecmd.New(w, kubeConfig, fileHandler, log) return kubecmd.New(w, kubeConfig, fileHandler, log)
@ -168,10 +155,9 @@ func (i *initCmd) initialize(
cmd.PrintErrln("WARNING: Attestation temporarily relies on AWS nitroTPM. See https://docs.edgeless.systems/constellation/workflows/config#choosing-a-vm-type for more information.") cmd.PrintErrln("WARNING: Attestation temporarily relies on AWS nitroTPM. See https://docs.edgeless.systems/constellation/workflows/config#choosing-a-vm-type for more information.")
} }
i.log.Debugf("Checking cluster ID file") stateFile, err := state.ReadFromFile(i.fileHandler, constants.StateFilename)
var idFile clusterid.File if err != nil {
if err := i.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil { return fmt.Errorf("reading state file: %w", err)
return fmt.Errorf("reading cluster ID file: %w", err)
} }
i.log.Debugf("Validated k8s version as %s", k8sVersion) i.log.Debugf("Validated k8s version as %s", k8sVersion)
@ -187,7 +173,10 @@ func (i *initCmd) initialize(
} }
i.log.Debugf("Checked license") i.log.Debugf("Checked license")
conf.UpdateMAAURL(idFile.AttestationURL) if stateFile.Infrastructure.Azure != nil {
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
i.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) i.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
validator, err := cloudcmd.NewValidator(cmd, conf.GetAttestationConfig(), i.log) validator, err := cloudcmd.NewValidator(cmd, conf.GetAttestationConfig(), i.log)
if err != nil { if err != nil {
@ -205,15 +194,14 @@ func (i *initCmd) initialize(
if err != nil { if err != nil {
return fmt.Errorf("generating master secret: %w", err) return fmt.Errorf("generating master secret: %w", err)
} }
i.log.Debugf("Generated measurement salt")
i.log.Debugf("Generating measurement salt")
measurementSalt, err := crypto.GenerateRandomBytes(crypto.RNGLengthDefault) measurementSalt, err := crypto.GenerateRandomBytes(crypto.RNGLengthDefault)
if err != nil { if err != nil {
return fmt.Errorf("generating measurement salt: %w", err) return fmt.Errorf("generating measurement salt: %w", err)
} }
idFile.MeasurementSalt = measurementSalt
clusterName := clusterid.GetClusterName(conf, idFile) i.log.Debugf("Setting cluster name to %s", stateFile.Infrastructure.Name)
i.log.Debugf("Setting cluster name to %s", clusterName)
cmd.PrintErrln("Note: If you just created the cluster, it can take a few minutes to connect.") cmd.PrintErrln("Note: If you just created the cluster, it can take a few minutes to connect.")
i.spinner.Start("Connecting ", false) i.spinner.Start("Connecting ", false)
@ -224,12 +212,12 @@ func (i *initCmd) initialize(
KubernetesVersion: versions.VersionConfigs[k8sVersion].ClusterVersion, KubernetesVersion: versions.VersionConfigs[k8sVersion].ClusterVersion,
KubernetesComponents: versions.VersionConfigs[k8sVersion].KubernetesComponents.ToInitProto(), KubernetesComponents: versions.VersionConfigs[k8sVersion].KubernetesComponents.ToInitProto(),
ConformanceMode: flags.conformance, ConformanceMode: flags.conformance,
InitSecret: idFile.InitSecret, InitSecret: stateFile.Infrastructure.InitSecret,
ClusterName: clusterName, ClusterName: stateFile.Infrastructure.Name,
ApiserverCertSans: idFile.APIServerCertSANs, ApiserverCertSans: stateFile.Infrastructure.APIServerCertSANs,
} }
i.log.Debugf("Sending initialization request") i.log.Debugf("Sending initialization request")
resp, err := i.initCall(cmd.Context(), newDialer(validator), idFile.IP, req) resp, err := i.initCall(cmd.Context(), newDialer(validator), stateFile.Infrastructure.ClusterEndpoint, req)
i.spinner.Stop() i.spinner.Stop()
if err != nil { if err != nil {
@ -247,11 +235,8 @@ func (i *initCmd) initialize(
} }
i.log.Debugf("Initialization request succeeded") i.log.Debugf("Initialization request succeeded")
i.log.Debugf("Writing Constellation ID file")
idFile.CloudProvider = provider
bufferedOutput := &bytes.Buffer{} bufferedOutput := &bytes.Buffer{}
if err := i.writeOutput(idFile, resp, flags.mergeConfigs, bufferedOutput); err != nil { if err := i.writeOutput(stateFile, resp, flags.mergeConfigs, bufferedOutput, measurementSalt); err != nil {
return err return err
} }
@ -263,11 +248,6 @@ func (i *initCmd) initialize(
return fmt.Errorf("applying attestation config: %w", err) return fmt.Errorf("applying attestation config: %w", err)
} }
infraState, err := i.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
if err != nil {
return fmt.Errorf("getting infrastructure state: %w", err)
}
i.spinner.Start("Installing Kubernetes components ", false) i.spinner.Start("Installing Kubernetes components ", false)
options := helm.Options{ options := helm.Options{
Force: flags.force, Force: flags.force,
@ -279,8 +259,7 @@ func (i *initCmd) initialize(
if err != nil { if err != nil {
return fmt.Errorf("creating Helm client: %w", err) return fmt.Errorf("creating Helm client: %w", err)
} }
executor, includesUpgrades, err := helmApplier.PrepareApply(conf, idFile, options, infraState, executor, includesUpgrades, err := helmApplier.PrepareApply(conf, stateFile, options, serviceAccURI, masterSecret)
serviceAccURI, masterSecret)
if err != nil { if err != nil {
return fmt.Errorf("getting Helm chart executor: %w", err) return fmt.Errorf("getting Helm chart executor: %w", err)
} }
@ -457,23 +436,32 @@ func (d *initDoer) handleGRPCStateChanges(ctx context.Context, wg *sync.WaitGrou
}) })
} }
// writeOutput writes the output of a cluster initialization to the
// state- / id- / kubeconfig-file and saves it to disk.
func (i *initCmd) writeOutput( func (i *initCmd) writeOutput(
idFile clusterid.File, initResp *initproto.InitSuccessResponse, mergeConfig bool, wr io.Writer, stateFile *state.State,
initResp *initproto.InitSuccessResponse,
mergeConfig bool, wr io.Writer,
measurementSalt []byte,
) error { ) error {
fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n") fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n")
ownerID := hex.EncodeToString(initResp.GetOwnerId()) ownerID := hex.EncodeToString(initResp.GetOwnerId())
// i.log.Debugf("Owner id is %s", ownerID)
clusterID := hex.EncodeToString(initResp.GetClusterId()) clusterID := hex.EncodeToString(initResp.GetClusterId())
stateFile.SetClusterValues(state.ClusterValues{
MeasurementSalt: measurementSalt,
OwnerID: ownerID,
ClusterID: clusterID,
})
tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0) tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0)
// writeRow(tw, "Constellation cluster's owner identifier", ownerID)
writeRow(tw, "Constellation cluster identifier", clusterID) writeRow(tw, "Constellation cluster identifier", clusterID)
writeRow(tw, "Kubernetes configuration", i.pf.PrefixPrintablePath(constants.AdminConfFilename)) writeRow(tw, "Kubernetes configuration", i.pf.PrefixPrintablePath(constants.AdminConfFilename))
tw.Flush() tw.Flush()
fmt.Fprintln(wr) fmt.Fprintln(wr)
i.log.Debugf("Rewriting cluster server address in kubeconfig to %s", idFile.IP) i.log.Debugf("Rewriting cluster server address in kubeconfig to %s", stateFile.Infrastructure.ClusterEndpoint)
kubeconfig, err := clientcmd.Load(initResp.GetKubeconfig()) kubeconfig, err := clientcmd.Load(initResp.GetKubeconfig())
if err != nil { if err != nil {
return fmt.Errorf("loading kubeconfig: %w", err) return fmt.Errorf("loading kubeconfig: %w", err)
@ -486,7 +474,7 @@ func (i *initCmd) writeOutput(
if err != nil { if err != nil {
return fmt.Errorf("parsing kubeconfig server URL: %w", err) return fmt.Errorf("parsing kubeconfig server URL: %w", err)
} }
kubeEndpoint.Host = net.JoinHostPort(idFile.IP, kubeEndpoint.Port()) kubeEndpoint.Host = net.JoinHostPort(stateFile.Infrastructure.ClusterEndpoint, kubeEndpoint.Port())
cluster.Server = kubeEndpoint.String() cluster.Server = kubeEndpoint.String()
} }
kubeconfigBytes, err := clientcmd.Write(*kubeconfig) kubeconfigBytes, err := clientcmd.Write(*kubeconfig)
@ -508,13 +496,11 @@ func (i *initCmd) writeOutput(
} }
} }
idFile.OwnerID = ownerID if err := stateFile.WriteToFile(i.fileHandler, constants.StateFilename); err != nil {
idFile.ClusterID = clusterID return fmt.Errorf("writing Constellation state file: %w", err)
if err := i.fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptOverwrite); err != nil {
return fmt.Errorf("writing Constellation ID file: %w", err)
} }
i.log.Debugf("Constellation ID file written to %s", i.pf.PrefixPrintablePath(constants.ClusterIDsFilename))
i.log.Debugf("Constellation state file written to %s", i.pf.PrefixPrintablePath(constants.StateFilename))
if !mergeConfig { if !mergeConfig {
fmt.Fprintln(wr, "You can now connect to your cluster by executing:") fmt.Fprintln(wr, "You can now connect to your cluster by executing:")
@ -694,11 +680,7 @@ type attestationConfigApplier interface {
} }
type helmApplier interface { type helmApplier interface {
PrepareApply(conf *config.Config, idFile clusterid.File, PrepareApply(conf *config.Config, stateFile *state.State,
flags helm.Options, infra state.Infrastructure, serviceAccURI string, masterSecret uri.MasterSecret) ( flags helm.Options, serviceAccURI string, masterSecret uri.MasterSecret) (
helm.Applier, bool, error) helm.Applier, bool, error)
} }
type infrastructureShower interface {
ShowInfrastructure(ctx context.Context, provider cloudprovider.Provider) (state.Infrastructure, error)
}

View File

@ -21,7 +21,6 @@ import (
"time" "time"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/helm" "github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
@ -92,7 +91,7 @@ func TestInitialize(t *testing.T) {
testCases := map[string]struct { testCases := map[string]struct {
provider cloudprovider.Provider provider cloudprovider.Provider
idFile *clusterid.File stateFile *state.State
configMutator func(*config.Config) configMutator func(*config.Config)
serviceAccKey *gcpshared.ServiceAccountKey serviceAccKey *gcpshared.ServiceAccountKey
initServerAPI *stubInitServer initServerAPI *stubInitServer
@ -102,32 +101,32 @@ func TestInitialize(t *testing.T) {
}{ }{
"initialize some gcp instances": { "initialize some gcp instances": {
provider: cloudprovider.GCP, provider: cloudprovider.GCP,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath }, configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
serviceAccKey: gcpServiceAccKey, serviceAccKey: gcpServiceAccKey,
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}}, initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
}, },
"initialize some azure instances": { "initialize some azure instances": {
provider: cloudprovider.Azure, provider: cloudprovider.Azure,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}}, initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
}, },
"initialize some qemu instances": { "initialize some qemu instances": {
provider: cloudprovider.QEMU, provider: cloudprovider.QEMU,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}}, initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
}, },
"non retriable error": { "non retriable error": {
provider: cloudprovider.QEMU, provider: cloudprovider.QEMU,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{initErr: &nonRetriableError{err: assert.AnError}}, initServerAPI: &stubInitServer{initErr: &nonRetriableError{err: assert.AnError}},
retriable: false, retriable: false,
masterSecretShouldExist: true, masterSecretShouldExist: true,
wantErr: true, wantErr: true,
}, },
"non retriable error with failed log collection": { "non retriable error with failed log collection": {
provider: cloudprovider.QEMU, provider: cloudprovider.QEMU,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{ initServerAPI: &stubInitServer{
res: []*initproto.InitResponse{ res: []*initproto.InitResponse{
{ {
@ -150,28 +149,35 @@ func TestInitialize(t *testing.T) {
masterSecretShouldExist: true, masterSecretShouldExist: true,
wantErr: true, wantErr: true,
}, },
"empty id file": { "state file with only version": {
provider: cloudprovider.GCP, provider: cloudprovider.GCP,
idFile: &clusterid.File{}, stateFile: &state.State{Version: state.Version1},
initServerAPI: &stubInitServer{}, initServerAPI: &stubInitServer{},
retriable: true, retriable: true,
wantErr: true, wantErr: true,
}, },
"no id file": { "empty state file": {
provider: cloudprovider.GCP,
stateFile: &state.State{},
initServerAPI: &stubInitServer{},
retriable: true,
wantErr: true,
},
"no state file": {
provider: cloudprovider.GCP, provider: cloudprovider.GCP,
retriable: true, retriable: true,
wantErr: true, wantErr: true,
}, },
"init call fails": { "init call fails": {
provider: cloudprovider.GCP, provider: cloudprovider.GCP,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{initErr: assert.AnError}, initServerAPI: &stubInitServer{initErr: assert.AnError},
retriable: true, retriable: true,
wantErr: true, wantErr: true,
}, },
"k8s version without v works": { "k8s version without v works": {
provider: cloudprovider.Azure, provider: cloudprovider.Azure,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}}, initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
configMutator: func(c *config.Config) { configMutator: func(c *config.Config) {
res, err := versions.NewValidK8sVersion(strings.TrimPrefix(string(versions.Default), "v"), true) res, err := versions.NewValidK8sVersion(strings.TrimPrefix(string(versions.Default), "v"), true)
@ -181,7 +187,7 @@ func TestInitialize(t *testing.T) {
}, },
"outdated k8s patch version doesn't work": { "outdated k8s patch version doesn't work": {
provider: cloudprovider.Azure, provider: cloudprovider.Azure,
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}}, initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
configMutator: func(c *config.Config) { configMutator: func(c *config.Config) {
v, err := semver.New(versions.SupportedK8sVersions()[0]) v, err := semver.New(versions.SupportedK8sVersions()[0])
@ -229,9 +235,10 @@ func TestInitialize(t *testing.T) {
tc.configMutator(config) tc.configMutator(config)
} }
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptNone)) require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptNone))
if tc.idFile != nil { stateFile := state.New()
tc.idFile.CloudProvider = tc.provider require.NoError(stateFile.WriteToFile(fileHandler, constants.StateFilename))
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, tc.idFile, file.OptNone)) if tc.stateFile != nil {
require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename))
} }
if tc.serviceAccKey != nil { if tc.serviceAccKey != nil {
require.NoError(fileHandler.WriteJSON(serviceAccPath, tc.serviceAccKey, file.OptNone)) require.NoError(fileHandler.WriteJSON(serviceAccPath, tc.serviceAccKey, file.OptNone))
@ -241,11 +248,16 @@ func TestInitialize(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, 4*time.Second) ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel() defer cancel()
cmd.SetContext(ctx) cmd.SetContext(ctx)
i := newInitCmd(&stubShowInfrastructure{}, fileHandler, &nopSpinner{}, nil, logger.NewTest(t)) i := newInitCmd(fileHandler, &nopSpinner{}, nil, logger.NewTest(t))
err := i.initialize(cmd, newDialer, &stubLicenseClient{}, stubAttestationFetcher{}, err := i.initialize(
cmd,
newDialer,
&stubLicenseClient{},
stubAttestationFetcher{},
func(io.Writer, string, debugLog) (attestationConfigApplier, error) { func(io.Writer, string, debugLog) (attestationConfigApplier, error) {
return &stubAttestationApplier{}, nil return &stubAttestationApplier{}, nil
}, func(_ string, _ debugLog) (helmApplier, error) { },
func(_ string, _ debugLog) (helmApplier, error) {
return &stubApplier{}, nil return &stubApplier{}, nil
}) })
@ -277,7 +289,7 @@ type stubApplier struct {
err error err error
} }
func (s stubApplier) PrepareApply(_ *config.Config, _ clusterid.File, _ helm.Options, _ state.Infrastructure, _ string, _ uri.MasterSecret) (helm.Applier, bool, error) { func (s stubApplier) PrepareApply(_ *config.Config, _ *state.State, _ helm.Options, _ string, _ uri.MasterSecret) (helm.Applier, bool, error) {
return stubRunner{}, false, s.err return stubRunner{}, false, s.err
} }
@ -386,26 +398,33 @@ func TestWriteOutput(t *testing.T) {
ownerID := hex.EncodeToString(resp.GetInitSuccess().GetOwnerId()) ownerID := hex.EncodeToString(resp.GetInitSuccess().GetOwnerId())
clusterID := hex.EncodeToString(resp.GetInitSuccess().GetClusterId()) clusterID := hex.EncodeToString(resp.GetInitSuccess().GetClusterId())
measurementSalt := []byte{0x41}
expectedIDFile := clusterid.File{ expectedStateFile := &state.State{
ClusterID: clusterID, Version: state.Version1,
OwnerID: ownerID, ClusterValues: state.ClusterValues{
IP: clusterEndpoint, ClusterID: clusterID,
UID: "test-uid", OwnerID: ownerID,
MeasurementSalt: []byte{0x41},
},
Infrastructure: state.Infrastructure{
APIServerCertSANs: []string{},
InitSecret: []byte{},
ClusterEndpoint: clusterEndpoint,
},
} }
var out bytes.Buffer var out bytes.Buffer
testFs := afero.NewMemMapFs() testFs := afero.NewMemMapFs()
fileHandler := file.NewHandler(testFs) fileHandler := file.NewHandler(testFs)
idFile := clusterid.File{ stateFile := state.New().SetInfrastructure(state.Infrastructure{
UID: "test-uid", ClusterEndpoint: clusterEndpoint,
IP: clusterEndpoint, })
}
i := newInitCmd(nil, fileHandler, &nopSpinner{}, &stubMerger{}, logger.NewTest(t)) i := newInitCmd(fileHandler, &nopSpinner{}, &stubMerger{}, logger.NewTest(t))
err = i.writeOutput(idFile, resp.GetInitSuccess(), false, &out) err = i.writeOutput(stateFile, resp.GetInitSuccess(), false, &out, measurementSalt)
require.NoError(err) require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID) assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), constants.AdminConfFilename) assert.Contains(out.String(), constants.AdminConfFilename)
@ -415,20 +434,17 @@ func TestWriteOutput(t *testing.T) {
assert.Contains(string(adminConf), clusterEndpoint) assert.Contains(string(adminConf), clusterEndpoint)
assert.Equal(string(expectedKubeconfigBytes), string(adminConf)) assert.Equal(string(expectedKubeconfigBytes), string(adminConf))
idsFile, err := afs.ReadFile(constants.ClusterIDsFilename) fh := file.NewHandler(afs)
readStateFile, err := state.ReadFromFile(fh, constants.StateFilename)
assert.NoError(err) assert.NoError(err)
var testIDFile clusterid.File assert.Equal(expectedStateFile, readStateFile)
err = json.Unmarshal(idsFile, &testIDFile)
assert.NoError(err)
assert.Equal(expectedIDFile, testIDFile)
out.Reset() out.Reset()
require.NoError(afs.Remove(constants.AdminConfFilename)) require.NoError(afs.Remove(constants.AdminConfFilename))
// test custom workspace // test custom workspace
i.pf = pathprefix.New("/some/path") i.pf = pathprefix.New("/some/path")
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out) err = i.writeOutput(stateFile, resp.GetInitSuccess(), true, &out, measurementSalt)
require.NoError(err) require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID) assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), i.pf.PrefixPrintablePath(constants.AdminConfFilename)) assert.Contains(out.String(), i.pf.PrefixPrintablePath(constants.AdminConfFilename))
out.Reset() out.Reset()
@ -437,9 +453,8 @@ func TestWriteOutput(t *testing.T) {
i.pf = pathprefix.PathPrefixer{} i.pf = pathprefix.PathPrefixer{}
// test config merging // test config merging
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out) err = i.writeOutput(stateFile, resp.GetInitSuccess(), true, &out, measurementSalt)
require.NoError(err) require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID) assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), constants.AdminConfFilename) assert.Contains(out.String(), constants.AdminConfFilename)
assert.Contains(out.String(), "Constellation kubeconfig merged with default config") assert.Contains(out.String(), "Constellation kubeconfig merged with default config")
@ -449,9 +464,8 @@ func TestWriteOutput(t *testing.T) {
// test config merging with env vars set // test config merging with env vars set
i.merger = &stubMerger{envVar: "/some/path/to/kubeconfig"} i.merger = &stubMerger{envVar: "/some/path/to/kubeconfig"}
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out) err = i.writeOutput(stateFile, resp.GetInitSuccess(), true, &out, measurementSalt)
require.NoError(err) require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID) assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), constants.AdminConfFilename) assert.Contains(out.String(), constants.AdminConfFilename)
assert.Contains(out.String(), "Constellation kubeconfig merged with default config") assert.Contains(out.String(), "Constellation kubeconfig merged with default config")
@ -496,7 +510,7 @@ func TestGenerateMasterSecret(t *testing.T) {
require.NoError(tc.createFileFunc(fileHandler)) require.NoError(tc.createFileFunc(fileHandler))
var out bytes.Buffer var out bytes.Buffer
i := newInitCmd(nil, fileHandler, nil, nil, logger.NewTest(t)) i := newInitCmd(fileHandler, nil, nil, logger.NewTest(t))
secret, err := i.generateMasterSecret(&out) secret, err := i.generateMasterSecret(&out)
if tc.wantErr { if tc.wantErr {
@ -530,7 +544,8 @@ func TestAttestation(t *testing.T) {
}, },
}, },
}} }}
existingIDFile := &clusterid.File{IP: "192.0.2.4", CloudProvider: cloudprovider.QEMU}
existingStateFile := &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.4"}}
netDialer := testdialer.NewBufconnDialer() netDialer := testdialer.NewBufconnDialer()
@ -561,7 +576,7 @@ func TestAttestation(t *testing.T) {
fs := afero.NewMemMapFs() fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs) fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, existingIDFile, file.OptNone)) require.NoError(existingStateFile.WriteToFile(fileHandler, constants.StateFilename))
cfg := config.Default() cfg := config.Default()
cfg.Image = "v0.0.0" // is the default version of the the CLI (before build injects the real version) cfg.Image = "v0.0.0" // is the default version of the the CLI (before build injects the real version)
@ -588,7 +603,7 @@ func TestAttestation(t *testing.T) {
defer cancel() defer cancel()
cmd.SetContext(ctx) cmd.SetContext(ctx)
i := newInitCmd(nil, fileHandler, &nopSpinner{}, nil, logger.NewTest(t)) i := newInitCmd(fileHandler, &nopSpinner{}, nil, logger.NewTest(t))
err := i.initialize(cmd, newDialer, &stubLicenseClient{}, stubAttestationFetcher{}, err := i.initialize(cmd, newDialer, &stubLicenseClient{}, stubAttestationFetcher{},
func(io.Writer, string, debugLog) (attestationConfigApplier, error) { func(io.Writer, string, debugLog) (attestationConfigApplier, error) {
return &stubAttestationApplier{}, nil return &stubAttestationApplier{}, nil
@ -758,23 +773,10 @@ func (c stubInitClient) Recv() (*initproto.InitResponse, error) {
return res, err return res, err
} }
type stubShowInfrastructure struct{}
func (s *stubShowInfrastructure) ShowInfrastructure(_ context.Context, csp cloudprovider.Provider) (state.Infrastructure, error) {
res := state.Infrastructure{}
switch csp {
case cloudprovider.Azure:
res.Azure = &state.Azure{}
case cloudprovider.GCP:
res.GCP = &state.GCP{}
}
return res, nil
}
type stubAttestationApplier struct { type stubAttestationApplier struct {
applyErr error applyErr error
} }
func (a *stubAttestationApplier) ApplyJoinConfig(_ context.Context, _ config.AttestationCfg, _ []byte) error { func (a *stubAttestationApplier) ApplyJoinConfig(context.Context, config.AttestationCfg, []byte) error {
return a.applyErr return a.applyErr
} }

View File

@ -11,8 +11,7 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero" "github.com/spf13/afero"
@ -44,14 +43,12 @@ func runDown(cmd *cobra.Command, args []string) error {
} }
func checkForMiniCluster(fileHandler file.Handler) error { func checkForMiniCluster(fileHandler file.Handler) error {
var idFile clusterid.File stateFile, err := state.ReadFromFile(fileHandler, constants.StateFilename)
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil { if err != nil {
return err return fmt.Errorf("reading state file: %w", err)
} }
if idFile.CloudProvider != cloudprovider.QEMU {
return errors.New("cluster is not a QEMU based Constellation") if stateFile.Infrastructure.UID != constants.MiniConstellationUID {
}
if idFile.UID != constants.MiniConstellationUID {
return errors.New("cluster is not a MiniConstellation cluster") return errors.New("cluster is not a MiniConstellation cluster")
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/edgelesssys/constellation/v2/cli/internal/helm" "github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd" "github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt" "github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform" "github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
@ -172,14 +173,18 @@ func (m *miniUpCmd) createMiniCluster(ctx context.Context, fileHandler file.Hand
TFWorkspace: constants.TerraformWorkingDir, TFWorkspace: constants.TerraformWorkingDir,
TFLogLevel: flags.tfLogLevel, TFLogLevel: flags.tfLogLevel,
} }
idFile, err := creator.Create(ctx, opts) infraState, err := creator.Create(ctx, opts)
if err != nil { if err != nil {
return err return err
} }
idFile.UID = constants.MiniConstellationUID // use UID "mini" to identify MiniConstellation clusters. infraState.UID = constants.MiniConstellationUID // use UID "mini" to identify MiniConstellation clusters.
m.log.Debugf("Cluster id file contains %v", idFile)
return fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone) stateFile := state.New().
SetInfrastructure(infraState)
m.log.Debugf("Cluster state file contains %v", stateFile)
return stateFile.WriteToFile(fileHandler, constants.StateFilename)
} }
// initializeMiniCluster initializes a QEMU cluster. // initializeMiniCluster initializes a QEMU cluster.
@ -208,18 +213,13 @@ func (m *miniUpCmd) initializeMiniCluster(cmd *cobra.Command, fileHandler file.H
m.log.Debugf("Created new logger") m.log.Debugf("Created new logger")
defer log.Sync() defer log.Sync()
tfClient, err := terraform.New(cmd.Context(), constants.TerraformWorkingDir)
if err != nil {
return fmt.Errorf("creating Terraform client: %w", err)
}
newAttestationApplier := func(w io.Writer, kubeConfig string, log debugLog) (attestationConfigApplier, error) { newAttestationApplier := func(w io.Writer, kubeConfig string, log debugLog) (attestationConfigApplier, error) {
return kubecmd.New(w, kubeConfig, fileHandler, log) return kubecmd.New(w, kubeConfig, fileHandler, log)
} }
newHelmClient := func(kubeConfigPath string, log debugLog) (helmApplier, error) { newHelmClient := func(kubeConfigPath string, log debugLog) (helmApplier, error) {
return helm.NewClient(kubeConfigPath, log) return helm.NewClient(kubeConfigPath, log)
} // need to defer helm client instantiation until kubeconfig is available } // need to defer helm client instantiation until kubeconfig is available
i := newInitCmd(tfClient, fileHandler, spinner, &kubeconfigMerger{log: log}, log) i := newInitCmd(fileHandler, spinner, &kubeconfigMerger{log: log}, log)
if err := i.initialize(cmd, newDialer, license.NewClient(), m.configFetcher, if err := i.initialize(cmd, newDialer, license.NewClient(), m.configFetcher,
newAttestationApplier, newHelmClient); err != nil { newAttestationApplier, newHelmClient); err != nil {
return err return err

View File

@ -16,15 +16,14 @@ import (
"time" "time"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto" "github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
grpcRetry "github.com/edgelesssys/constellation/v2/internal/grpc/retry" grpcRetry "github.com/edgelesssys/constellation/v2/internal/grpc/retry"
@ -225,39 +224,40 @@ func (r *recoverCmd) parseRecoverFlags(cmd *cobra.Command, fileHandler file.Hand
r.log.Debugf("Workspace set to %q", workDir) r.log.Debugf("Workspace set to %q", workDir)
r.pf = pathprefix.New(workDir) r.pf = pathprefix.New(workDir)
var idFile clusterid.File
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
return recoverFlags{}, err
}
endpoint, err := cmd.Flags().GetString("endpoint") endpoint, err := cmd.Flags().GetString("endpoint")
r.log.Debugf("Endpoint flag is %s", endpoint) r.log.Debugf("Endpoint flag is %s", endpoint)
if err != nil { if err != nil {
return recoverFlags{}, fmt.Errorf("parsing endpoint argument: %w", err) return recoverFlags{}, fmt.Errorf("parsing endpoint argument: %w", err)
} }
if endpoint == "" {
endpoint = idFile.IP
}
endpoint, err = addPortIfMissing(endpoint, constants.RecoveryPort)
if err != nil {
return recoverFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
}
r.log.Debugf("Endpoint value after parsing is %s", endpoint)
force, err := cmd.Flags().GetBool("force") force, err := cmd.Flags().GetBool("force")
if err != nil { if err != nil {
return recoverFlags{}, fmt.Errorf("parsing force argument: %w", err) return recoverFlags{}, fmt.Errorf("parsing force argument: %w", err)
} }
var attestationURL string
stateFile := state.New()
if endpoint == "" {
stateFile, err = state.ReadFromFile(fileHandler, constants.StateFilename)
if err != nil {
return recoverFlags{}, fmt.Errorf("reading state file: %w", err)
}
endpoint = stateFile.Infrastructure.ClusterEndpoint
}
endpoint, err = addPortIfMissing(endpoint, constants.RecoveryPort)
if err != nil {
return recoverFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
}
r.log.Debugf("Endpoint value after parsing is %s", endpoint)
if stateFile.Infrastructure.Azure != nil {
attestationURL = stateFile.Infrastructure.Azure.AttestationURL
}
return recoverFlags{ return recoverFlags{
endpoint: endpoint, endpoint: endpoint,
maaURL: idFile.AttestationURL, maaURL: attestationURL,
force: force, force: force,
}, nil }, nil
} }
func getStateDiskKeyFunc(masterKey, salt []byte) func(uuid string) ([]byte, error) {
return func(uuid string) ([]byte, error) {
return crypto.DeriveKey(masterKey, salt, []byte(crypto.DEKPrefix+uuid), crypto.StateDiskKeyLength)
}
}

View File

@ -15,12 +15,13 @@ import (
"testing" "testing"
"time" "time"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto" "github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/crypto/testvector" "github.com/edgelesssys/constellation/v2/internal/crypto/testvector"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
@ -184,16 +185,16 @@ func TestRecover(t *testing.T) {
func TestParseRecoverFlags(t *testing.T) { func TestParseRecoverFlags(t *testing.T) {
testCases := map[string]struct { testCases := map[string]struct {
args []string args []string
wantFlags recoverFlags wantFlags recoverFlags
writeIDFile bool writeStateFile bool
wantErr bool wantErr bool
}{ }{
"no flags": { "no flags": {
wantFlags: recoverFlags{ wantFlags: recoverFlags{
endpoint: "192.0.2.42:9999", endpoint: "192.0.2.42:9999",
}, },
writeIDFile: true, writeStateFile: true,
}, },
"no flags, no ID file": { "no flags, no ID file": {
wantFlags: recoverFlags{ wantFlags: recoverFlags{
@ -224,8 +225,12 @@ func TestParseRecoverFlags(t *testing.T) {
require.NoError(cmd.ParseFlags(tc.args)) require.NoError(cmd.ParseFlags(tc.args))
fileHandler := file.NewHandler(afero.NewMemMapFs()) fileHandler := file.NewHandler(afero.NewMemMapFs())
if tc.writeIDFile { if tc.writeStateFile {
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, &clusterid.File{IP: "192.0.2.42"})) require.NoError(
state.New().
SetInfrastructure(state.Infrastructure{ClusterEndpoint: "192.0.2.42"}).
WriteToFile(fileHandler, constants.StateFilename),
)
} }
r := &recoverCmd{log: logger.NewTest(t)} r := &recoverCmd{log: logger.NewTest(t)}
flags, err := r.parseRecoverFlags(cmd, fileHandler) flags, err := r.parseRecoverFlags(cmd, fileHandler)
@ -309,6 +314,12 @@ func TestDeriveStateDiskKey(t *testing.T) {
} }
} }
func getStateDiskKeyFunc(masterKey, salt []byte) func(uuid string) ([]byte, error) {
return func(uuid string) ([]byte, error) {
return crypto.DeriveKey(masterKey, salt, []byte(crypto.DEKPrefix+uuid), crypto.StateDiskKeyLength)
}
}
type stubRecoveryServer struct { type stubRecoveryServer struct {
recoverError error recoverError error
recoverproto.UnimplementedAPIServer recoverproto.UnimplementedAPIServer

View File

@ -84,6 +84,7 @@ func terminate(cmd *cobra.Command, terminator cloudTerminator, fileHandler file.
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", pf.PrefixPrintablePath(constants.AdminConfFilename))) removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", pf.PrefixPrintablePath(constants.AdminConfFilename)))
} }
// TODO(msanft): Once v2.12.0 is released, remove the ID-file-removal here.
if err := fileHandler.Remove(constants.ClusterIDsFilename); err != nil && !errors.Is(err, fs.ErrNotExist) { if err := fileHandler.Remove(constants.ClusterIDsFilename); err != nil && !errors.Is(err, fs.ErrNotExist) {
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", pf.PrefixPrintablePath(constants.ClusterIDsFilename))) removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", pf.PrefixPrintablePath(constants.ClusterIDsFilename)))
} }

View File

@ -11,8 +11,7 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero" "github.com/spf13/afero"
@ -47,65 +46,64 @@ func TestTerminateCmdArgumentValidation(t *testing.T) {
} }
func TestTerminate(t *testing.T) { func TestTerminate(t *testing.T) {
setupFs := func(require *require.Assertions, idFile clusterid.File) afero.Fs { setupFs := func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := afero.NewMemMapFs() fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs) fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone)) require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone))
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone)) require.NoError(stateFile.WriteToFile(fileHandler, constants.StateFilename))
require.NoError(fileHandler.Write(constants.StateFilename, []byte{3, 4}, file.OptNone))
return fs return fs
} }
someErr := errors.New("failed") someErr := errors.New("failed")
testCases := map[string]struct { testCases := map[string]struct {
idFile clusterid.File stateFile *state.State
yesFlag bool yesFlag bool
stdin string stdin string
setupFs func(*require.Assertions, clusterid.File) afero.Fs setupFs func(*require.Assertions, *state.State) afero.Fs
terminator spyCloudTerminator terminator spyCloudTerminator
wantErr bool wantErr bool
wantAbort bool wantAbort bool
}{ }{
"success": { "success": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: setupFs, setupFs: setupFs,
terminator: &stubCloudTerminator{}, terminator: &stubCloudTerminator{},
yesFlag: true, yesFlag: true,
}, },
"interactive": { "interactive": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: setupFs, setupFs: setupFs,
terminator: &stubCloudTerminator{}, terminator: &stubCloudTerminator{},
stdin: "yes\n", stdin: "yes\n",
}, },
"interactive abort": { "interactive abort": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: setupFs, setupFs: setupFs,
terminator: &stubCloudTerminator{}, terminator: &stubCloudTerminator{},
stdin: "no\n", stdin: "no\n",
wantAbort: true, wantAbort: true,
}, },
"files to remove do not exist": { "files to remove do not exist": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs { setupFs: func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := afero.NewMemMapFs() fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs) fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone)) require.NoError(stateFile.WriteToFile(fileHandler, constants.StateFilename))
return fs return fs
}, },
terminator: &stubCloudTerminator{}, terminator: &stubCloudTerminator{},
yesFlag: true, yesFlag: true,
}, },
"terminate error": { "terminate error": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: setupFs, setupFs: setupFs,
terminator: &stubCloudTerminator{terminateErr: someErr}, terminator: &stubCloudTerminator{terminateErr: someErr},
yesFlag: true, yesFlag: true,
wantErr: true, wantErr: true,
}, },
"missing id file does not error": { "missing id file does not error": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs { setupFs: func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := afero.NewMemMapFs() fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs) fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone)) require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone))
@ -115,9 +113,9 @@ func TestTerminate(t *testing.T) {
yesFlag: true, yesFlag: true,
}, },
"remove file fails": { "remove file fails": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP}, stateFile: state.New(),
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs { setupFs: func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := setupFs(require, idFile) fs := setupFs(require, stateFile)
return afero.NewReadOnlyFs(fs) return afero.NewReadOnlyFs(fs)
}, },
terminator: &stubCloudTerminator{}, terminator: &stubCloudTerminator{},
@ -141,7 +139,7 @@ func TestTerminate(t *testing.T) {
cmd.Flags().String("workspace", "", "") cmd.Flags().String("workspace", "", "")
require.NotNil(tc.setupFs) require.NotNil(tc.setupFs)
fileHandler := file.NewHandler(tc.setupFs(require, tc.idFile)) fileHandler := file.NewHandler(tc.setupFs(require, tc.stateFile))
if tc.yesFlag { if tc.yesFlag {
require.NoError(cmd.Flags().Set("yes", "true")) require.NoError(cmd.Flags().Set("yes", "true"))
@ -159,8 +157,6 @@ func TestTerminate(t *testing.T) {
assert.True(tc.terminator.Called()) assert.True(tc.terminator.Called())
_, err = fileHandler.Stat(constants.AdminConfFilename) _, err = fileHandler.Stat(constants.AdminConfFilename)
assert.Error(err) assert.Error(err)
_, err = fileHandler.Stat(constants.ClusterIDsFilename)
assert.Error(err)
_, err = fileHandler.Stat(constants.StateFilename) _, err = fileHandler.Stat(constants.StateFilename)
assert.Error(err) assert.Error(err)
} }

View File

@ -11,6 +11,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/fs"
"path/filepath" "path/filepath"
"strings" "strings"
"time" "time"
@ -145,6 +146,10 @@ type upgradeApplyCmd struct {
log debugLog log debugLog
} }
type infrastructureShower interface {
ShowInfrastructure(ctx context.Context, provider cloudprovider.Provider) (state.Infrastructure, error)
}
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, flags upgradeApplyFlags) error { func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, flags upgradeApplyFlags) error {
conf, err := config.New(u.fileHandler, constants.ConfigFilename, u.configFetcher, flags.force) conf, err := config.New(u.fileHandler, constants.ConfigFilename, u.configFetcher, flags.force)
var configValidationErr *config.ValidationError var configValidationErr *config.ValidationError
@ -172,11 +177,24 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
return err return err
} }
var idFile clusterid.File stateFile, err := state.ReadFromFile(u.fileHandler, constants.StateFilename)
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil { // TODO(msanft): Remove reading from idFile once v2.12.0 is released and read from state file directly.
return fmt.Errorf("reading cluster ID file: %w", err) // For now, this is only here to ensure upgradability from an id-file to a state file version.
if errors.Is(err, fs.ErrNotExist) {
u.log.Debugf("%s does not exist in current directory, falling back to reading from %s",
constants.StateFilename, constants.ClusterIDsFilename)
var idFile clusterid.File
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
return fmt.Errorf("reading cluster ID file: %w", err)
}
// Convert id-file to state file
stateFile = state.NewFromIDFile(idFile, conf)
if stateFile.Infrastructure.Azure != nil {
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
} else if err != nil {
return fmt.Errorf("reading state file: %w", err)
} }
conf.UpdateMAAURL(idFile.AttestationURL)
// Apply migrations necessary for the upgrade // Apply migrations necessary for the upgrade
if err := migrateFrom2_10(cmd.Context(), u.kubeUpgrader); err != nil { if err := migrateFrom2_10(cmd.Context(), u.kubeUpgrader); err != nil {
@ -186,37 +204,55 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
return fmt.Errorf("applying migration for upgrading from v2.11: %w", err) return fmt.Errorf("applying migration for upgrading from v2.11: %w", err)
} }
if err := u.confirmAndUpgradeAttestationConfig(cmd, conf.GetAttestationConfig(), idFile.MeasurementSalt, flags); err != nil { if err := u.confirmAndUpgradeAttestationConfig(cmd, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt, flags); err != nil {
return fmt.Errorf("upgrading measurements: %w", err) return fmt.Errorf("upgrading measurements: %w", err)
} }
var infraState state.Infrastructure // If infrastructure phase is skipped, we expect the new infrastructure
// to be in the Terraform configuration already. Otherwise, perform
// the Terraform migrations.
var postMigrationInfraState state.Infrastructure
if flags.skipPhases.contains(skipInfrastructurePhase) { if flags.skipPhases.contains(skipInfrastructurePhase) {
infraState, err = u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider()) // TODO(msanft): Once v2.12.0 is released, this should be removed and the state should be read
// from the state file instead, as it will be the only source of truth for the cluster's infrastructure.
postMigrationInfraState, err = u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
if err != nil { if err != nil {
return fmt.Errorf("getting infra state: %w", err) return fmt.Errorf("getting Terraform state: %w", err)
} }
} else { } else {
infraState, err = u.migrateTerraform(cmd, conf, upgradeDir, flags) postMigrationInfraState, err = u.migrateTerraform(cmd, conf, upgradeDir, flags)
if err != nil { if err != nil {
return fmt.Errorf("performing Terraform migrations: %w", err) return fmt.Errorf("performing Terraform migrations: %w", err)
} }
} }
// reload idFile after terraform migration
// it might have been updated by the migration // Merge the pre-upgrade state with the post-migration infrastructure values
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil { if _, err := stateFile.Merge(
return fmt.Errorf("reading updated cluster ID file: %w", err) // temporary state with post-migration infrastructure values
state.New().SetInfrastructure(postMigrationInfraState),
); err != nil {
return fmt.Errorf("merging pre-upgrade state with post-migration infrastructure values: %w", err)
} }
state := state.NewState(infraState)
// TODO(elchead): AB#3424 move this to updateClusterIDFile and correctly handle existing state when writing state // Write the post-migration state to disk
if err := u.fileHandler.WriteYAML(constants.StateFilename, state, file.OptOverwrite); err != nil { if err := stateFile.WriteToFile(u.fileHandler, constants.StateFilename); err != nil {
return fmt.Errorf("writing state file: %w", err) return fmt.Errorf("writing state file: %w", err)
} }
// TODO(msanft): Remove this after v2.12.0 is released, as we do not support
// the id-file starting from v2.13.0.
err = u.fileHandler.RenameFile(constants.ClusterIDsFilename, constants.ClusterIDsFilename+".old")
if !errors.Is(err, fs.ErrNotExist) && err != nil {
return fmt.Errorf("removing cluster ID file: %w", err)
}
// extend the clusterConfig cert SANs with any of the supported endpoints: // extend the clusterConfig cert SANs with any of the supported endpoints:
// - (legacy) public IP // - (legacy) public IP
// - fallback endpoint // - fallback endpoint
// - custom (user-provided) endpoint // - custom (user-provided) endpoint
sans := append([]string{idFile.IP, conf.CustomEndpoint}, idFile.APIServerCertSANs...) // TODO(msanft): Remove the comment below once v2.12.0 is released.
// At this point, state file and id-file should have been merged, so we can use the state file.
sans := append([]string{stateFile.Infrastructure.ClusterEndpoint, conf.CustomEndpoint}, stateFile.Infrastructure.APIServerCertSANs...)
if err := u.kubeUpgrader.ExtendClusterConfigCertSANs(cmd.Context(), sans); err != nil { if err := u.kubeUpgrader.ExtendClusterConfigCertSANs(cmd.Context(), sans); err != nil {
return fmt.Errorf("extending cert SANs: %w", err) return fmt.Errorf("extending cert SANs: %w", err)
} }
@ -228,7 +264,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
var upgradeErr *compatibility.InvalidUpgradeError var upgradeErr *compatibility.InvalidUpgradeError
if !flags.skipPhases.contains(skipHelmPhase) { if !flags.skipPhases.contains(skipHelmPhase) {
err = u.handleServiceUpgrade(cmd, conf, idFile, infraState, upgradeDir, flags) err = u.handleServiceUpgrade(cmd, conf, stateFile, upgradeDir, flags)
switch { switch {
case errors.As(err, &upgradeErr): case errors.As(err, &upgradeErr):
cmd.PrintErrln(err) cmd.PrintErrln(err)
@ -269,14 +305,16 @@ func diffAttestationCfg(currentAttestationCfg config.AttestationCfg, newAttestat
} }
// migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration // migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration
// of cloud resources with Terraform. If so, the migration is performed. // of cloud resources with Terraform. If so, the migration is performed and the post-migration infrastructure state is returned.
func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, upgradeDir string, flags upgradeApplyFlags, // If no migration is required, the current (pre-upgrade) infrastructure state is returned.
) (res state.Infrastructure, err error) { func (u *upgradeApplyCmd) migrateTerraform(
cmd *cobra.Command, conf *config.Config, upgradeDir string, flags upgradeApplyFlags,
) (state.Infrastructure, error) {
u.log.Debugf("Planning Terraform migrations") u.log.Debugf("Planning Terraform migrations")
vars, err := cloudcmd.TerraformUpgradeVars(conf) vars, err := cloudcmd.TerraformUpgradeVars(conf)
if err != nil { if err != nil {
return res, fmt.Errorf("parsing upgrade variables: %w", err) return state.Infrastructure{}, fmt.Errorf("parsing upgrade variables: %w", err)
} }
u.log.Debugf("Using Terraform variables:\n%v", vars) u.log.Debugf("Using Terraform variables:\n%v", vars)
@ -292,60 +330,46 @@ func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Conf
hasDiff, err := u.clusterUpgrader.PlanClusterUpgrade(cmd.Context(), cmd.OutOrStdout(), vars, conf.GetProvider()) hasDiff, err := u.clusterUpgrader.PlanClusterUpgrade(cmd.Context(), cmd.OutOrStdout(), vars, conf.GetProvider())
if err != nil { if err != nil {
return res, fmt.Errorf("planning terraform migrations: %w", err) return state.Infrastructure{}, fmt.Errorf("planning terraform migrations: %w", err)
} }
if !hasDiff {
if hasDiff {
// If there are any Terraform migrations to apply, ask for confirmation
fmt.Fprintln(cmd.OutOrStdout(), "The upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template. Please manually review the suggested changes below.")
if !flags.yes {
ok, err := askToConfirm(cmd, "Do you want to apply the Terraform migrations?")
if err != nil {
return res, fmt.Errorf("asking for confirmation: %w", err)
}
if !ok {
cmd.Println("Aborting upgrade.")
// User doesn't expect to see any changes in his workspace after aborting an "upgrade apply",
// therefore, roll back to the backed up state.
if err := u.clusterUpgrader.RestoreClusterWorkspace(); err != nil {
return res, fmt.Errorf(
"restoring Terraform workspace: %w, restore the Terraform workspace manually from %s ",
err,
filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir),
)
}
return res, fmt.Errorf("cluster upgrade aborted by user")
}
}
u.log.Debugf("Applying Terraform migrations")
infraState, err := u.clusterUpgrader.ApplyClusterUpgrade(cmd.Context(), conf.GetProvider())
if err != nil {
return infraState, fmt.Errorf("applying terraform migrations: %w", err)
}
// Apply possible updates to cluster ID file
if err := updateClusterIDFile(infraState, u.fileHandler); err != nil {
return infraState, fmt.Errorf("merging cluster ID files: %w", err)
}
cmd.Printf("Terraform migrations applied successfully and output written to: %s\n"+
"A backup of the pre-upgrade state has been written to: %s\n",
flags.pf.PrefixPrintablePath(constants.ClusterIDsFilename),
flags.pf.PrefixPrintablePath(filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir)),
)
} else {
u.log.Debugf("No Terraform diff detected") u.log.Debugf("No Terraform diff detected")
return u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
} }
u.log.Debugf("No Terraform diff detected")
infraState, err := u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider()) // If there are any Terraform migrations to apply, ask for confirmation
fmt.Fprintln(cmd.OutOrStdout(), "The upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template. Please manually review the suggested changes below.")
if !flags.yes {
ok, err := askToConfirm(cmd, "Do you want to apply the Terraform migrations?")
if err != nil {
return state.Infrastructure{}, fmt.Errorf("asking for confirmation: %w", err)
}
if !ok {
cmd.Println("Aborting upgrade.")
// User doesn't expect to see any changes in his workspace after aborting an "upgrade apply",
// therefore, roll back to the backed up state.
if err := u.clusterUpgrader.RestoreClusterWorkspace(); err != nil {
return state.Infrastructure{}, fmt.Errorf(
"restoring Terraform workspace: %w, restore the Terraform workspace manually from %s ",
err,
filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir),
)
}
return state.Infrastructure{}, fmt.Errorf("cluster upgrade aborted by user")
}
}
u.log.Debugf("Applying Terraform migrations")
infraState, err := u.clusterUpgrader.ApplyClusterUpgrade(cmd.Context(), conf.GetProvider())
if err != nil { if err != nil {
return infraState, fmt.Errorf("getting Terraform output: %w", err) return state.Infrastructure{}, fmt.Errorf("applying terraform migrations: %w", err)
}
state := state.NewState(infraState)
// TODO(elchead): AB#3424 move this to updateClusterIDFile and correctly handle existing state when writing state
if err := u.fileHandler.WriteYAML(constants.StateFilename, state, file.OptOverwrite); err != nil {
return infraState, fmt.Errorf("writing state file: %w", err)
} }
cmd.Printf("Infrastructure migrations applied successfully and output written to: %s\n"+
"A backup of the pre-upgrade state has been written to: %s\n",
flags.pf.PrefixPrintablePath(constants.StateFilename),
flags.pf.PrefixPrintablePath(filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir)),
)
return infraState, nil return infraState, nil
} }
@ -408,12 +432,12 @@ func (u *upgradeApplyCmd) confirmAndUpgradeAttestationConfig(
if err := u.kubeUpgrader.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt); err != nil { if err := u.kubeUpgrader.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt); err != nil {
return fmt.Errorf("updating attestation config: %w", err) return fmt.Errorf("updating attestation config: %w", err)
} }
cmd.Println("Successfully update the cluster's attestation config") cmd.Println("Successfully updated the cluster's attestation config")
return nil return nil
} }
func (u *upgradeApplyCmd) handleServiceUpgrade( func (u *upgradeApplyCmd) handleServiceUpgrade(
cmd *cobra.Command, conf *config.Config, idFile clusterid.File, infra state.Infrastructure, cmd *cobra.Command, conf *config.Config, stateFile *state.State,
upgradeDir string, flags upgradeApplyFlags, upgradeDir string, flags upgradeApplyFlags,
) error { ) error {
var secret uri.MasterSecret var secret uri.MasterSecret
@ -432,8 +456,7 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(
prepareApply := func(allowDestructive bool) (helm.Applier, bool, error) { prepareApply := func(allowDestructive bool) (helm.Applier, bool, error) {
options.AllowDestructive = allowDestructive options.AllowDestructive = allowDestructive
executor, includesUpgrades, err := u.helmApplier.PrepareApply(conf, idFile, options, executor, includesUpgrades, err := u.helmApplier.PrepareApply(conf, stateFile, options, serviceAccURI, secret)
infra, serviceAccURI, secret)
var upgradeErr *compatibility.InvalidUpgradeError var upgradeErr *compatibility.InvalidUpgradeError
switch { switch {
case errors.As(err, &upgradeErr): case errors.As(err, &upgradeErr):
@ -587,29 +610,6 @@ func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
}, nil }, nil
} }
func updateClusterIDFile(infraState state.Infrastructure, fileHandler file.Handler) error {
newIDFile := clusterid.File{
InitSecret: []byte(infraState.InitSecret),
IP: infraState.ClusterEndpoint,
APIServerCertSANs: infraState.APIServerCertSANs,
UID: infraState.UID,
}
if infraState.Azure != nil {
newIDFile.AttestationURL = infraState.Azure.AttestationURL
}
idFile := &clusterid.File{}
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, idFile); err != nil {
return fmt.Errorf("reading %s: %w", constants.ClusterIDsFilename, err)
}
if err := fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile.Merge(newIDFile), file.OptOverwrite); err != nil {
return fmt.Errorf("writing %s: %w", constants.ClusterIDsFilename, err)
}
return nil
}
type upgradeApplyFlags struct { type upgradeApplyFlags struct {
pf pathprefix.PathPrefixer pf pathprefix.PathPrefixer
yes bool yes bool

View File

@ -34,75 +34,151 @@ import (
) )
func TestUpgradeApply(t *testing.T) { func TestUpgradeApply(t *testing.T) {
defaultState := state.New().
SetInfrastructure(state.Infrastructure{
APIServerCertSANs: []string{},
UID: "uid",
Name: "kubernetes-uid", // default test cfg uses "kubernetes" prefix
InitSecret: []byte{0x42},
}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}})
defaultIDFile := clusterid.File{
MeasurementSalt: []byte{0x41},
UID: "uid",
InitSecret: []byte{0x42},
}
fsWithIDFile := func() file.Handler {
fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(t, fh.WriteJSON(constants.ClusterIDsFilename, defaultIDFile))
return fh
}
fsWithStateFile := func() file.Handler {
fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(t, fh.WriteYAML(constants.StateFilename, defaultState))
return fh
}
testCases := map[string]struct { testCases := map[string]struct {
helmUpgrader helmApplier helmUpgrader helmApplier
kubeUpgrader *stubKubernetesUpgrader kubeUpgrader *stubKubernetesUpgrader
terraformUpgrader clusterUpgrader fh func() file.Handler
wantErr bool fhAssertions func(require *require.Assertions, assert *assert.Assertions, fh file.Handler)
customK8sVersion string terraformUpgrader clusterUpgrader
flags upgradeApplyFlags infrastructureShower *stubShowInfrastructure
stdin string wantErr bool
customK8sVersion string
flags upgradeApplyFlags
stdin string
}{ }{
"success": { "success": {
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()}, kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{}, terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
fhAssertions: func(require *require.Assertions, assert *assert.Assertions, fh file.Handler) {
gotState, err := state.ReadFromFile(fh, constants.StateFilename)
require.NoError(err)
assert.Equal("v1", gotState.Version)
assert.Equal(defaultState, gotState)
},
},
"fall back to id file": {
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithIDFile,
fhAssertions: func(require *require.Assertions, assert *assert.Assertions, fh file.Handler) {
gotState, err := state.ReadFromFile(fh, constants.StateFilename)
require.NoError(err)
assert.Equal("v1", gotState.Version)
assert.Equal(defaultState, gotState)
var oldIDFile clusterid.File
err = fh.ReadJSON(constants.ClusterIDsFilename+".old", &oldIDFile)
assert.NoError(err)
assert.Equal(defaultIDFile, oldIDFile)
},
},
"id file and state file do not exist": {
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: func() file.Handler {
return file.NewHandler(afero.NewMemMapFs())
},
wantErr: true,
}, },
"nodeVersion some error": { "nodeVersion some error": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
nodeVersionErr: assert.AnError, nodeVersionErr: assert.AnError,
}, },
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{}, terraformUpgrader: &stubTerraformUpgrader{},
wantErr: true, wantErr: true,
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"nodeVersion in progress error": { "nodeVersion in progress error": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
nodeVersionErr: kubecmd.ErrInProgress, nodeVersionErr: kubecmd.ErrInProgress,
}, },
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{}, terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"helm other error": { "helm other error": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
}, },
helmUpgrader: stubApplier{err: assert.AnError}, helmUpgrader: stubApplier{err: assert.AnError},
terraformUpgrader: &stubTerraformUpgrader{}, terraformUpgrader: &stubTerraformUpgrader{},
wantErr: true, wantErr: true,
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"abort": { "abort": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
}, },
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true}, terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true},
wantErr: true, wantErr: true,
stdin: "no\n", stdin: "no\n",
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"abort, restore terraform err": { "abort, restore terraform err": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
}, },
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true, rollbackWorkspaceErr: assert.AnError}, terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true, rollbackWorkspaceErr: assert.AnError},
wantErr: true, wantErr: true,
stdin: "no\n", stdin: "no\n",
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"plan terraform error": { "plan terraform error": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
}, },
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{planTerraformErr: assert.AnError}, terraformUpgrader: &stubTerraformUpgrader{planTerraformErr: assert.AnError},
wantErr: true, wantErr: true,
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"apply terraform error": { "apply terraform error": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
@ -113,8 +189,10 @@ func TestUpgradeApply(t *testing.T) {
applyTerraformErr: assert.AnError, applyTerraformErr: assert.AnError,
terraformDiff: true, terraformDiff: true,
}, },
wantErr: true, wantErr: true,
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"outdated K8s patch version": { "outdated K8s patch version": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
@ -127,18 +205,21 @@ func TestUpgradeApply(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
return semver.NewFromInt(v.Major(), v.Minor(), v.Patch()-1, "").String() return semver.NewFromInt(v.Major(), v.Minor(), v.Patch()-1, "").String()
}(), }(),
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
wantErr: false, infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"outdated K8s version": { "outdated K8s version": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(), currentConfig: config.DefaultForAzureSEVSNP(),
}, },
helmUpgrader: stubApplier{}, helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{}, terraformUpgrader: &stubTerraformUpgrader{},
customK8sVersion: "v1.20.0", customK8sVersion: "v1.20.0",
flags: upgradeApplyFlags{yes: true}, flags: upgradeApplyFlags{yes: true},
wantErr: true, wantErr: true,
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
"skip all upgrade phases": { "skip all upgrade phases": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
@ -150,6 +231,24 @@ func TestUpgradeApply(t *testing.T) {
skipPhases: []skipPhase{skipInfrastructurePhase, skipHelmPhase, skipK8sPhase, skipImagePhase}, skipPhases: []skipPhase{skipInfrastructurePhase, skipHelmPhase, skipK8sPhase, skipImagePhase},
yes: true, yes: true,
}, },
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"show state err": {
kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
},
helmUpgrader: &stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{
skipPhases: []skipPhase{skipInfrastructurePhase},
yes: true,
},
infrastructureShower: &stubShowInfrastructure{
showInfraErr: assert.AnError,
},
wantErr: true,
fh: fsWithStateFile,
}, },
"skip all phases except node upgrade": { "skip all phases except node upgrade": {
kubeUpgrader: &stubKubernetesUpgrader{ kubeUpgrader: &stubKubernetesUpgrader{
@ -161,6 +260,8 @@ func TestUpgradeApply(t *testing.T) {
skipPhases: []skipPhase{skipInfrastructurePhase, skipHelmPhase, skipK8sPhase}, skipPhases: []skipPhase{skipInfrastructurePhase, skipHelmPhase, skipK8sPhase},
yes: true, yes: true,
}, },
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
}, },
} }
@ -171,15 +272,13 @@ func TestUpgradeApply(t *testing.T) {
cmd := newUpgradeApplyCmd() cmd := newUpgradeApplyCmd()
cmd.SetIn(bytes.NewBufferString(tc.stdin)) cmd.SetIn(bytes.NewBufferString(tc.stdin))
handler := file.NewHandler(afero.NewMemMapFs())
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure) cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
if tc.customK8sVersion != "" { if tc.customK8sVersion != "" {
cfg.KubernetesVersion = versions.ValidK8sVersion(tc.customK8sVersion) cfg.KubernetesVersion = versions.ValidK8sVersion(tc.customK8sVersion)
} }
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg)) fh := tc.fh()
require.NoError(handler.WriteJSON(constants.ClusterIDsFilename, clusterid.File{MeasurementSalt: []byte("measurementSalt")})) require.NoError(fh.WriteYAML(constants.ConfigFilename, cfg))
require.NoError(handler.WriteJSON(constants.MasterSecretFilename, uri.MasterSecret{})) require.NoError(fh.WriteJSON(constants.MasterSecretFilename, uri.MasterSecret{}))
upgrader := upgradeApplyCmd{ upgrader := upgradeApplyCmd{
kubeUpgrader: tc.kubeUpgrader, kubeUpgrader: tc.kubeUpgrader,
@ -187,8 +286,8 @@ func TestUpgradeApply(t *testing.T) {
clusterUpgrader: tc.terraformUpgrader, clusterUpgrader: tc.terraformUpgrader,
log: logger.NewTest(t), log: logger.NewTest(t),
configFetcher: stubAttestationFetcher{}, configFetcher: stubAttestationFetcher{},
clusterShower: &stubShowInfrastructure{}, clusterShower: tc.infrastructureShower,
fileHandler: handler, fileHandler: fh,
} }
err := upgrader.upgradeApply(cmd, "test", tc.flags) err := upgrader.upgradeApply(cmd, "test", tc.flags)
@ -200,14 +299,9 @@ func TestUpgradeApply(t *testing.T) {
assert.Equal(!tc.flags.skipPhases.contains(skipImagePhase), tc.kubeUpgrader.calledNodeUpgrade, assert.Equal(!tc.flags.skipPhases.contains(skipImagePhase), tc.kubeUpgrader.calledNodeUpgrade,
"incorrect node upgrade skipping behavior") "incorrect node upgrade skipping behavior")
var gotState state.State if tc.fhAssertions != nil {
expectedState := state.Infrastructure{ tc.fhAssertions(require, assert, fh)
APIServerCertSANs: []string{},
Azure: &state.Azure{},
} }
require.NoError(handler.ReadYAML(constants.StateFilename, &gotState))
assert.Equal("v1", gotState.Version)
assert.Equal(expectedState, gotState.Infrastructure)
}) })
} }
} }
@ -308,9 +402,17 @@ type mockApplier struct {
mock.Mock mock.Mock
} }
func (m *mockApplier) PrepareApply(cfg *config.Config, clusterID clusterid.File, func (m *mockApplier) PrepareApply(cfg *config.Config, stateFile *state.State,
helmOpts helm.Options, infraState state.Infrastructure, str string, masterSecret uri.MasterSecret, helmOpts helm.Options, str string, masterSecret uri.MasterSecret,
) (helm.Applier, bool, error) { ) (helm.Applier, bool, error) {
args := m.Called(cfg, clusterID, helmOpts, infraState, str, masterSecret) args := m.Called(cfg, stateFile, helmOpts, str, masterSecret)
return args.Get(0).(helm.Applier), args.Bool(1), args.Error(2) return args.Get(0).(helm.Applier), args.Bool(1), args.Error(2)
} }
type stubShowInfrastructure struct {
showInfraErr error
}
func (s *stubShowInfrastructure) ShowInfrastructure(context.Context, cloudprovider.Provider) (state.Infrastructure, error) {
return state.Infrastructure{}, s.showInfraErr
}

View File

@ -26,8 +26,8 @@ import (
tpmProto "github.com/google/go-tpm-tools/proto/tpm" tpmProto "github.com/google/go-tpm-tools/proto/tpm"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -54,7 +54,7 @@ func NewVerifyCmd() *cobra.Command {
Use: "verify", Use: "verify",
Short: "Verify the confidential properties of a Constellation cluster", Short: "Verify the confidential properties of a Constellation cluster",
Long: "Verify the confidential properties of a Constellation cluster.\n" + Long: "Verify the confidential properties of a Constellation cluster.\n" +
"If arguments aren't specified, values are read from `" + constants.ClusterIDsFilename + "`.", "If arguments aren't specified, values are read from `" + constants.StateFilename + "`.",
Args: cobra.ExactArgs(0), Args: cobra.ExactArgs(0),
RunE: runVerify, RunE: runVerify,
} }
@ -204,27 +204,36 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
} }
c.log.Debugf("Flag 'output' set to %t", output) c.log.Debugf("Flag 'output' set to %t", output)
var idFile clusterid.File // Get empty values from state file
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) { stateFile, err := state.ReadFromFile(fileHandler, constants.StateFilename)
return verifyFlags{}, fmt.Errorf("reading cluster ID file: %w", err) isFileNotFound := errors.Is(err, afero.ErrFileNotFound)
if isFileNotFound {
c.log.Debugf("State file %q not found, using empty state", pf.PrefixPrintablePath(constants.StateFilename))
stateFile = state.New() // error compat
} else if err != nil {
return verifyFlags{}, fmt.Errorf("reading state file: %w", err)
} }
// Get empty values from ID file
emptyEndpoint := endpoint == "" emptyEndpoint := endpoint == ""
emptyIDs := ownerID == "" && clusterID == "" emptyIDs := ownerID == "" && clusterID == ""
if emptyEndpoint || emptyIDs { if emptyEndpoint || emptyIDs {
c.log.Debugf("Trying to supplement empty flag values from %q", pf.PrefixPrintablePath(constants.ClusterIDsFilename)) c.log.Debugf("Trying to supplement empty flag values from %q", pf.PrefixPrintablePath(constants.StateFilename))
if emptyEndpoint { if emptyEndpoint {
cmd.PrintErrf("Using endpoint from %q. Specify --node-endpoint to override this.\n", pf.PrefixPrintablePath(constants.ClusterIDsFilename)) cmd.PrintErrf("Using endpoint from %q. Specify --node-endpoint to override this.\n", pf.PrefixPrintablePath(constants.StateFilename))
endpoint = idFile.IP endpoint = stateFile.Infrastructure.ClusterEndpoint
} }
if emptyIDs { if emptyIDs {
cmd.PrintErrf("Using ID from %q. Specify --cluster-id to override this.\n", pf.PrefixPrintablePath(constants.ClusterIDsFilename)) cmd.PrintErrf("Using ID from %q. Specify --cluster-id to override this.\n", pf.PrefixPrintablePath(constants.StateFilename))
ownerID = idFile.OwnerID ownerID = stateFile.ClusterValues.OwnerID
clusterID = idFile.ClusterID clusterID = stateFile.ClusterValues.ClusterID
} }
} }
var attestationURL string
if stateFile.Infrastructure.Azure != nil {
attestationURL = stateFile.Infrastructure.Azure.AttestationURL
}
// Validate // Validate
if ownerID == "" && clusterID == "" { if ownerID == "" && clusterID == "" {
return verifyFlags{}, errors.New("cluster-id not provided to verify the cluster") return verifyFlags{}, errors.New("cluster-id not provided to verify the cluster")
@ -239,8 +248,8 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
pf: pf, pf: pf,
ownerID: ownerID, ownerID: ownerID,
clusterID: clusterID, clusterID: clusterID,
maaURL: idFile.AttestationURL,
output: output, output: output,
maaURL: attestationURL,
force: force, force: force,
}, nil }, nil
} }

View File

@ -17,7 +17,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -48,7 +48,7 @@ func TestVerify(t *testing.T) {
formatter *stubAttDocFormatter formatter *stubAttDocFormatter
nodeEndpointFlag string nodeEndpointFlag string
clusterIDFlag string clusterIDFlag string
idFile *clusterid.File stateFile *state.State
wantEndpoint string wantEndpoint string
skipConfigCreation bool skipConfigCreation bool
wantErr bool wantErr bool
@ -84,11 +84,11 @@ func TestVerify(t *testing.T) {
formatter: &stubAttDocFormatter{}, formatter: &stubAttDocFormatter{},
wantErr: true, wantErr: true,
}, },
"endpoint from id file": { "endpoint from state file": {
provider: cloudprovider.GCP, provider: cloudprovider.GCP,
clusterIDFlag: zeroBase64, clusterIDFlag: zeroBase64,
protoClient: &stubVerifyClient{}, protoClient: &stubVerifyClient{},
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC), wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
formatter: &stubAttDocFormatter{}, formatter: &stubAttDocFormatter{},
}, },
@ -97,7 +97,7 @@ func TestVerify(t *testing.T) {
nodeEndpointFlag: "192.0.2.2:1234", nodeEndpointFlag: "192.0.2.2:1234",
clusterIDFlag: zeroBase64, clusterIDFlag: zeroBase64,
protoClient: &stubVerifyClient{}, protoClient: &stubVerifyClient{},
idFile: &clusterid.File{IP: "192.0.2.1"}, stateFile: &state.State{Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
wantEndpoint: "192.0.2.2:1234", wantEndpoint: "192.0.2.2:1234",
formatter: &stubAttDocFormatter{}, formatter: &stubAttDocFormatter{},
}, },
@ -115,11 +115,11 @@ func TestVerify(t *testing.T) {
formatter: &stubAttDocFormatter{}, formatter: &stubAttDocFormatter{},
wantErr: true, wantErr: true,
}, },
"use owner id from id file": { "use owner id from state file": {
provider: cloudprovider.GCP, provider: cloudprovider.GCP,
nodeEndpointFlag: "192.0.2.1:1234", nodeEndpointFlag: "192.0.2.1:1234",
protoClient: &stubVerifyClient{}, protoClient: &stubVerifyClient{},
idFile: &clusterid.File{OwnerID: zeroBase64}, stateFile: &state.State{ClusterValues: state.ClusterValues{OwnerID: zeroBase64}},
wantEndpoint: "192.0.2.1:1234", wantEndpoint: "192.0.2.1:1234",
formatter: &stubAttDocFormatter{}, formatter: &stubAttDocFormatter{},
}, },
@ -180,8 +180,8 @@ func TestVerify(t *testing.T) {
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider) cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg)) require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
} }
if tc.idFile != nil { if tc.stateFile != nil {
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, tc.idFile, file.OptNone)) require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename))
} }
v := &verifyCmd{log: logger.NewTest(t)} v := &verifyCmd{log: logger.NewTest(t)}

View File

@ -416,7 +416,6 @@ go_library(
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/helm", importpath = "github.com/edgelesssys/constellation/v2/cli/internal/helm",
visibility = ["//cli:__subpackages__"], visibility = ["//cli:__subpackages__"],
deps = [ deps = [
"//cli/internal/clusterid",
"//cli/internal/helm/imageversion", "//cli/internal/helm/imageversion",
"//cli/internal/state", "//cli/internal/state",
"//internal/cloud/azureshared", "//internal/cloud/azureshared",
@ -458,7 +457,6 @@ go_test(
data = glob(["testdata/**"]), data = glob(["testdata/**"]),
embed = [":helm"], embed = [":helm"],
deps = [ deps = [
"//cli/internal/clusterid",
"//cli/internal/state", "//cli/internal/state",
"//internal/attestation/measurements", "//internal/attestation/measurements",
"//internal/cloud/azureshared", "//internal/cloud/azureshared",

View File

@ -32,7 +32,6 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constants"
@ -87,10 +86,10 @@ type Options struct {
// PrepareApply loads the charts and returns the executor to apply them. // PrepareApply loads the charts and returns the executor to apply them.
// TODO(elchead): remove validK8sVersion by putting ValidK8sVersion into config.Config, see AB#3374. // TODO(elchead): remove validK8sVersion by putting ValidK8sVersion into config.Config, see AB#3374.
func (h Client) PrepareApply( func (h Client) PrepareApply(
conf *config.Config, idFile clusterid.File, conf *config.Config, stateFile *state.State,
flags Options, infra state.Infrastructure, serviceAccURI string, masterSecret uri.MasterSecret, flags Options, serviceAccURI string, masterSecret uri.MasterSecret,
) (Applier, bool, error) { ) (Applier, bool, error) {
releases, err := h.loadReleases(conf, masterSecret, idFile, flags, infra, serviceAccURI) releases, err := h.loadReleases(conf, masterSecret, stateFile, flags, serviceAccURI)
if err != nil { if err != nil {
return nil, false, fmt.Errorf("loading Helm releases: %w", err) return nil, false, fmt.Errorf("loading Helm releases: %w", err)
} }
@ -101,12 +100,11 @@ func (h Client) PrepareApply(
func (h Client) loadReleases( func (h Client) loadReleases(
conf *config.Config, secret uri.MasterSecret, conf *config.Config, secret uri.MasterSecret,
idFile clusterid.File, flags Options, infra state.Infrastructure, serviceAccURI string, stateFile *state.State, flags Options, serviceAccURI string,
) ([]Release, error) { ) ([]Release, error) {
helmLoader := newLoader(conf, idFile, h.cliVersion) helmLoader := newLoader(conf, stateFile, h.cliVersion)
h.log.Debugf("Created new Helm loader") h.log.Debugf("Created new Helm loader")
return helmLoader.loadReleases(flags.Conformance, flags.HelmWaitMode, secret, return helmLoader.loadReleases(flags.Conformance, flags.HelmWaitMode, secret, serviceAccURI)
serviceAccURI, infra)
} }
// Applier runs the Helm actions. // Applier runs the Helm actions.

View File

@ -10,7 +10,6 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/compatibility" "github.com/edgelesssys/constellation/v2/internal/compatibility"
@ -208,8 +207,10 @@ func TestHelmApply(t *testing.T) {
options.AllowDestructive = tc.allowDestructive options.AllowDestructive = tc.allowDestructive
ex, includesUpgrade, err := sut.PrepareApply(cfg, ex, includesUpgrade, err := sut.PrepareApply(cfg,
clusterid.File{UID: "testuid", MeasurementSalt: []byte("measurementSalt")}, options, state.New().
fakeInfraOutput(csp), fakeServiceAccURI(csp), SetInfrastructure(state.Infrastructure{UID: "testuid"}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}}),
options, fakeServiceAccURI(csp),
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")}) uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")})
var upgradeErr *compatibility.InvalidUpgradeError var upgradeErr *compatibility.InvalidUpgradeError
if tc.expectError { if tc.expectError {
@ -225,17 +226,6 @@ func TestHelmApply(t *testing.T) {
} }
} }
func fakeInfraOutput(csp cloudprovider.Provider) state.Infrastructure {
switch csp {
case cloudprovider.AWS:
return state.Infrastructure{}
case cloudprovider.GCP:
return state.Infrastructure{GCP: &state.GCP{}}
default:
panic("invalid csp")
}
}
func getActionReleaseNames(actions []applyAction) []string { func getActionReleaseNames(actions []applyAction) []string {
releaseActionNames := []string{} releaseActionNames := []string{}
for _, action := range actions { for _, action := range actions {

View File

@ -19,7 +19,6 @@ import (
"helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/chart/loader"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/helm/imageversion" "github.com/edgelesssys/constellation/v2/cli/internal/helm/imageversion"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -72,12 +71,12 @@ type chartLoader struct {
constellationOperatorImage string constellationOperatorImage string
nodeMaintenanceOperatorImage string nodeMaintenanceOperatorImage string
clusterName string clusterName string
idFile clusterid.File stateFile *state.State
cliVersion semver.Semver cliVersion semver.Semver
} }
// newLoader creates a new ChartLoader. // newLoader creates a new ChartLoader.
func newLoader(config *config.Config, idFile clusterid.File, cliVersion semver.Semver) *chartLoader { func newLoader(config *config.Config, stateFile *state.State, cliVersion semver.Semver) *chartLoader {
// TODO(malt3): Allow overriding container image registry + prefix for all images // TODO(malt3): Allow overriding container image registry + prefix for all images
// (e.g. for air-gapped environments). // (e.g. for air-gapped environments).
var ccmImage, cnmImage string var ccmImage, cnmImage string
@ -97,7 +96,7 @@ func newLoader(config *config.Config, idFile clusterid.File, cliVersion semver.S
return &chartLoader{ return &chartLoader{
cliVersion: cliVersion, cliVersion: cliVersion,
csp: csp, csp: csp,
idFile: idFile, stateFile: stateFile,
ccmImage: ccmImage, ccmImage: ccmImage,
azureCNMImage: cnmImage, azureCNMImage: cnmImage,
config: config, config: config,
@ -120,13 +119,13 @@ type releaseApplyOrder []Release
// loadReleases loads the embedded helm charts and returns them as a HelmReleases object. // loadReleases loads the embedded helm charts and returns them as a HelmReleases object.
func (i *chartLoader) loadReleases(conformanceMode bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret, func (i *chartLoader) loadReleases(conformanceMode bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret,
serviceAccURI string, infra state.Infrastructure, serviceAccURI string,
) (releaseApplyOrder, error) { ) (releaseApplyOrder, error) {
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode) ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
if err != nil { if err != nil {
return nil, fmt.Errorf("loading cilium: %w", err) return nil, fmt.Errorf("loading cilium: %w", err)
} }
ciliumVals := extraCiliumValues(i.config.GetProvider(), conformanceMode, infra) ciliumVals := extraCiliumValues(i.config.GetProvider(), conformanceMode, i.stateFile.Infrastructure)
ciliumRelease.Values = mergeMaps(ciliumRelease.Values, ciliumVals) ciliumRelease.Values = mergeMaps(ciliumRelease.Values, ciliumVals)
certManagerRelease, err := i.loadRelease(certManagerInfo, helmWaitMode) certManagerRelease, err := i.loadRelease(certManagerInfo, helmWaitMode)
@ -138,14 +137,14 @@ func (i *chartLoader) loadReleases(conformanceMode bool, helmWaitMode WaitMode,
if err != nil { if err != nil {
return nil, fmt.Errorf("loading operators: %w", err) return nil, fmt.Errorf("loading operators: %w", err)
} }
operatorRelease.Values = mergeMaps(operatorRelease.Values, extraOperatorValues(i.idFile.UID)) operatorRelease.Values = mergeMaps(operatorRelease.Values, extraOperatorValues(i.stateFile.Infrastructure.UID))
conServicesRelease, err := i.loadRelease(constellationServicesInfo, helmWaitMode) conServicesRelease, err := i.loadRelease(constellationServicesInfo, helmWaitMode)
if err != nil { if err != nil {
return nil, fmt.Errorf("loading constellation-services: %w", err) return nil, fmt.Errorf("loading constellation-services: %w", err)
} }
svcVals, err := extraConstellationServicesValues(i.config, masterSecret, i.idFile.UID, serviceAccURI, infra) svcVals, err := extraConstellationServicesValues(i.config, masterSecret, serviceAccURI, i.stateFile.Infrastructure)
if err != nil { if err != nil {
return nil, fmt.Errorf("extending constellation-services values: %w", err) return nil, fmt.Errorf("extending constellation-services values: %w", err)
} }
@ -216,7 +215,7 @@ func (i *chartLoader) loadRelease(info chartInfo, helmWaitMode WaitMode) (Releas
func (i *chartLoader) loadAWSLBControllerValues() map[string]any { func (i *chartLoader) loadAWSLBControllerValues() map[string]any {
return map[string]any{ return map[string]any{
"clusterName": clusterid.GetClusterName(i.config, i.idFile), "clusterName": i.stateFile.Infrastructure.Name,
"tolerations": controlPlaneTolerations, "tolerations": controlPlaneTolerations,
"nodeSelector": controlPlaneNodeSelector, "nodeSelector": controlPlaneNodeSelector,
} }

View File

@ -22,7 +22,6 @@ import (
"helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/engine" "helm.sh/helm/v3/pkg/engine"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state" "github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared" "github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
@ -66,12 +65,23 @@ func TestLoadReleases(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
require := require.New(t) require := require.New(t)
config := &config.Config{Provider: config.ProviderConfig{GCP: &config.GCPConfig{}}} config := &config.Config{Provider: config.ProviderConfig{GCP: &config.GCPConfig{}}}
chartLoader := newLoader(config, clusterid.File{UID: "testuid", MeasurementSalt: []byte("measurementSalt")}, chartLoader := newLoader(
semver.NewFromInt(2, 10, 0, "")) config,
state.New().
SetInfrastructure(state.Infrastructure{
GCP: &state.GCP{
ProjectID: "test-project-id",
IPCidrNode: "test-node-cidr",
IPCidrPod: "test-pod-cidr",
},
}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}}),
semver.NewFromInt(2, 10, 0, ""),
)
helmReleases, err := chartLoader.loadReleases( helmReleases, err := chartLoader.loadReleases(
true, WaitModeAtomic, true, WaitModeAtomic,
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")}, uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")},
fakeServiceAccURI(cloudprovider.GCP), state.Infrastructure{GCP: &state.GCP{}}, fakeServiceAccURI(cloudprovider.GCP),
) )
require.NoError(err) require.NoError(err)
for _, release := range helmReleases { for _, release := range helmReleases {
@ -85,7 +95,7 @@ func TestLoadAWSLoadBalancerValues(t *testing.T) {
sut := chartLoader{ sut := chartLoader{
config: &config.Config{Name: "testCluster"}, config: &config.Config{Name: "testCluster"},
clusterName: "testCluster", clusterName: "testCluster",
idFile: clusterid.File{UID: "testuid"}, stateFile: state.New().SetInfrastructure(state.Infrastructure{UID: "testuid", Name: "testCluster-testuid"}),
} }
val := sut.loadAWSLBControllerValues() val := sut.loadAWSLBControllerValues()
assert.Equal(t, "testCluster-testuid", val["clusterName"]) assert.Equal(t, "testCluster-testuid", val["clusterName"])
@ -174,8 +184,8 @@ func TestConstellationServices(t *testing.T) {
tc.config, uri.MasterSecret{ tc.config, uri.MasterSecret{
Key: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), Key: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
Salt: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), Salt: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
}, }, serviceAccURI, state.Infrastructure{
"uid", serviceAccURI, state.Infrastructure{ UID: "uid",
Azure: &state.Azure{}, Azure: &state.Azure{},
GCP: &state.GCP{}, GCP: &state.GCP{},
}) })

View File

@ -54,7 +54,7 @@ func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, ou
// extraConstellationServicesValues extends the given values map by some values depending on user input. // extraConstellationServicesValues extends the given values map by some values depending on user input.
// Values set inside this function are only applied during init, not during upgrade. // Values set inside this function are only applied during init, not during upgrade.
func extraConstellationServicesValues( func extraConstellationServicesValues(
cfg *config.Config, masterSecret uri.MasterSecret, uid, serviceAccURI string, output state.Infrastructure, cfg *config.Config, masterSecret uri.MasterSecret, serviceAccURI string, output state.Infrastructure,
) (map[string]any, error) { ) (map[string]any, error) {
extraVals := map[string]any{} extraVals := map[string]any{}
extraVals["join-service"] = map[string]any{ extraVals["join-service"] = map[string]any{
@ -102,7 +102,7 @@ func extraConstellationServicesValues(
extraVals["ccm"] = map[string]any{ extraVals["ccm"] = map[string]any{
"GCP": map[string]any{ "GCP": map[string]any{
"projectID": output.GCP.ProjectID, "projectID": output.GCP.ProjectID,
"uid": uid, "uid": output.UID,
"secretData": string(rawKey), "secretData": string(rawKey),
"subnetworkPodCIDR": output.GCP.IPCidrPod, "subnetworkPodCIDR": output.GCP.IPCidrPod,
}, },

View File

@ -1,8 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("//bazel/go:go_test.bzl", "go_test")
go_library( go_library(
name = "state", name = "state",
srcs = ["state.go"], srcs = ["state.go"],
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/state", importpath = "github.com/edgelesssys/constellation/v2/cli/internal/state",
visibility = ["//cli:__subpackages__"], visibility = ["//cli:__subpackages__"],
deps = [
"//cli/internal/clusterid",
"//internal/config",
"//internal/file",
"@cat_dario_mergo//:mergo",
],
)
go_test(
name = "state_test",
srcs = ["state_test.go"],
embed = [":state"],
deps = [
"//cli/internal/clusterid",
"//internal/config",
"//internal/constants",
"//internal/file",
"@com_github_spf13_afero//:afero",
"@com_github_stretchr_testify//assert",
"@in_gopkg_yaml_v3//:yaml_v3",
],
) )

View File

@ -7,33 +7,117 @@ SPDX-License-Identifier: AGPL-3.0-only
// package state defines the structure of the Constellation state file. // package state defines the structure of the Constellation state file.
package state package state
import (
"fmt"
"dario.cat/mergo"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/file"
)
const ( const (
// Version1 is the first version of the state file. // Version1 is the first version of the state file.
Version1 = "v1" Version1 = "v1"
) )
// ReadFromFile reads the state file at the given path and returns the state.
func ReadFromFile(fileHandler file.Handler, path string) (*State, error) {
state := &State{}
if err := fileHandler.ReadYAML(path, &state); err != nil {
return nil, fmt.Errorf("reading state file: %w", err)
}
return state, nil
}
// State describe the entire state to describe a Constellation cluster. // State describe the entire state to describe a Constellation cluster.
type State struct { type State struct {
Version string `yaml:"version"` Version string `yaml:"version"`
Infrastructure Infrastructure `yaml:"infrastructure"` Infrastructure Infrastructure `yaml:"infrastructure"`
ClusterValues ClusterValues `yaml:"clusterValues"`
} }
// NewState creates a new state with the given infrastructure. // New creates a new cluster state (file).
func NewState(Infrastructure Infrastructure) State { func New() *State {
return State{ return &State{
Version: Version1, Version: Version1,
Infrastructure: Infrastructure,
} }
} }
// NewFromIDFile creates a new cluster state file from the given ID file and config.
func NewFromIDFile(idFile clusterid.File, cfg *config.Config) *State {
s := New().
SetClusterValues(ClusterValues{
OwnerID: idFile.OwnerID,
ClusterID: idFile.ClusterID,
MeasurementSalt: idFile.MeasurementSalt,
}).
SetInfrastructure(Infrastructure{
UID: idFile.UID,
ClusterEndpoint: idFile.IP,
APIServerCertSANs: idFile.APIServerCertSANs,
InitSecret: idFile.InitSecret,
Name: clusterid.GetClusterName(cfg, idFile),
})
if idFile.AttestationURL != "" {
s.Infrastructure.Azure = &Azure{
AttestationURL: idFile.AttestationURL,
}
}
return s
}
// SetInfrastructure sets the infrastructure state.
func (s *State) SetInfrastructure(infrastructure Infrastructure) *State {
s.Infrastructure = infrastructure
return s
}
// SetClusterValues sets the cluster values.
func (s *State) SetClusterValues(clusterValues ClusterValues) *State {
s.ClusterValues = clusterValues
return s
}
// WriteToFile writes the state to the given path, overwriting any existing file.
func (s *State) WriteToFile(fileHandler file.Handler, path string) error {
if err := fileHandler.WriteYAML(path, s, file.OptMkdirAll, file.OptOverwrite); err != nil {
return fmt.Errorf("writing state file: %w", err)
}
return nil
}
// Merge merges the state information from other into the current state.
// If a field is set in both states, the value of the other state is used.
func (s *State) Merge(other *State) (*State, error) {
if err := mergo.Merge(s, other, mergo.WithOverride); err != nil {
return nil, fmt.Errorf("merging state file: %w", err)
}
return s, nil
}
// ClusterValues describe the (Kubernetes) cluster state, set during initialization of the cluster.
type ClusterValues struct {
// ClusterID is the unique identifier of the cluster.
ClusterID string `yaml:"clusterID"`
// OwnerID is the unique identifier of the owner of the cluster.
OwnerID string `yaml:"ownerID"`
// MeasurementSalt is the salt generated during cluster init.
MeasurementSalt []byte `yaml:"measurementSalt"`
}
// Infrastructure describe the state related to the cloud resources of the cluster. // Infrastructure describe the state related to the cloud resources of the cluster.
type Infrastructure struct { type Infrastructure struct {
UID string `yaml:"uid"` UID string `yaml:"uid"`
ClusterEndpoint string `yaml:"clusterEndpoint"` ClusterEndpoint string `yaml:"clusterEndpoint"`
InitSecret string `yaml:"initSecret"` InitSecret []byte `yaml:"initSecret"`
APIServerCertSANs []string `yaml:"apiServerCertSANs"` APIServerCertSANs []string `yaml:"apiServerCertSANs"`
Azure *Azure `yaml:"azure,omitempty"` // Name is the name of the cluster.
GCP *GCP `yaml:"gcp,omitempty"` Name string `yaml:"name"`
Azure *Azure `yaml:"azure,omitempty"`
GCP *GCP `yaml:"gcp,omitempty"`
} }
// GCP describes the infra state related to GCP. // GCP describes the infra state related to GCP.

View File

@ -0,0 +1,392 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package state
import (
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
var defaultState = &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "123",
ClusterEndpoint: "test-cluster-endpoint",
InitSecret: []byte{0x41},
APIServerCertSANs: []string{
"api-server-cert-san-test",
"api-server-cert-san-test-2",
},
Azure: &Azure{
ResourceGroup: "test-rg",
SubscriptionID: "test-sub",
NetworkSecurityGroupName: "test-nsg",
LoadBalancerName: "test-lb",
UserAssignedIdentity: "test-uami",
AttestationURL: "test-maaUrl",
},
GCP: &GCP{
ProjectID: "test-project",
IPCidrNode: "test-cidr-node",
IPCidrPod: "test-cidr-pod",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
OwnerID: "test-owner-id",
MeasurementSalt: []byte{0x41},
},
}
func TestWriteToFile(t *testing.T) {
prepareFs := func(existingFiles ...string) file.Handler {
fs := afero.NewMemMapFs()
fh := file.NewHandler(fs)
for _, name := range existingFiles {
if err := fh.Write(name, []byte{0x41}); err != nil {
t.Fatalf("failed to create file %s: %v", name, err)
}
}
return fh
}
testCases := map[string]struct {
state *State
fh file.Handler
wantErr bool
}{
"success": {
state: defaultState,
fh: prepareFs(),
},
"overwrite": {
state: defaultState,
fh: prepareFs(constants.StateFilename),
},
"empty state": {
state: &State{},
fh: prepareFs(),
},
"rofs": {
state: defaultState,
fh: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
err := tc.state.WriteToFile(tc.fh, constants.StateFilename)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(mustMarshalYaml(t, tc.state), mustReadFromFile(t, tc.fh))
}
})
}
}
func TestReadFromFile(t *testing.T) {
prepareFs := func(existingFiles map[string][]byte) file.Handler {
fs := afero.NewMemMapFs()
fh := file.NewHandler(fs)
for name, content := range existingFiles {
if err := fh.Write(name, content); err != nil {
t.Fatalf("failed to create file %s: %v", name, err)
}
}
return fh
}
testCases := map[string]struct {
existingFiles map[string][]byte
wantErr bool
}{
"success": {
existingFiles: map[string][]byte{
constants.StateFilename: mustMarshalYaml(t, defaultState),
},
},
"no state file present": {
existingFiles: map[string][]byte{},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
fh := prepareFs(tc.existingFiles)
state, err := ReadFromFile(fh, constants.StateFilename)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(tc.existingFiles[constants.StateFilename], mustMarshalYaml(t, state))
}
})
}
}
func mustMarshalYaml(t *testing.T, v any) []byte {
t.Helper()
b, err := yaml.Marshal(v)
if err != nil {
t.Fatalf("failed to marshal yaml: %v", err)
}
return b
}
func mustReadFromFile(t *testing.T, fh file.Handler) []byte {
t.Helper()
b, err := fh.Read(constants.StateFilename)
if err != nil {
t.Fatalf("failed to read file: %v", err)
}
return b
}
func TestMerge(t *testing.T) {
testCases := map[string]struct {
state *State
other *State
expected *State
wantErr bool
}{
"success": {
state: &State{
Infrastructure: Infrastructure{
ClusterEndpoint: "test-cluster-endpoint",
UID: "123",
},
},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
ClusterEndpoint: "test-cluster-endpoint",
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty state": {
state: &State{},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty other": {
state: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
other: &State{},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty state and other": {
state: &State{},
other: &State{},
expected: &State{},
},
"identical": {
state: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"nested pointer": {
state: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "123",
Azure: &Azure{
AttestationURL: "test-maaUrl",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
Azure: &Azure{
AttestationURL: "test-maaUrl-2",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
Azure: &Azure{
AttestationURL: "test-maaUrl-2",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
_, err := tc.state.Merge(tc.other)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(tc.expected, tc.state)
}
})
}
}
func TestNewFromIDFile(t *testing.T) {
testCases := map[string]struct {
idFile clusterid.File
cfg *config.Config
expected *State
}{
"success": {
idFile: clusterid.File{
ClusterID: "test-cluster-id",
UID: "test-uid",
},
cfg: config.Default(),
expected: &State{
Version: Version1,
Infrastructure: Infrastructure{
UID: "test-uid",
Name: "constell-test-uid",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty id file": {
idFile: clusterid.File{},
cfg: config.Default(),
expected: &State{Version: Version1, Infrastructure: Infrastructure{Name: "constell-"}},
},
"nested pointer": {
idFile: clusterid.File{
ClusterID: "test-cluster-id",
UID: "test-uid",
AttestationURL: "test-maaUrl",
},
cfg: config.Default(),
expected: &State{
Version: Version1,
Infrastructure: Infrastructure{
UID: "test-uid",
Azure: &Azure{
AttestationURL: "test-maaUrl",
},
Name: "constell-test-uid",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
state := NewFromIDFile(tc.idFile, tc.cfg)
assert.Equal(tc.expected, state)
})
}
}

View File

@ -221,11 +221,21 @@ func (c *Client) ShowInfrastructure(ctx context.Context, provider cloudprovider.
return state.Infrastructure{}, errors.New("invalid type in uid output: not a string") return state.Infrastructure{}, errors.New("invalid type in uid output: not a string")
} }
nameOutput, ok := tfState.Values.Outputs["name"]
if !ok {
return state.Infrastructure{}, errors.New("no name output found")
}
name, ok := nameOutput.Value.(string)
if !ok {
return state.Infrastructure{}, errors.New("invalid type in name output: not a string")
}
res := state.Infrastructure{ res := state.Infrastructure{
ClusterEndpoint: ip, ClusterEndpoint: ip,
APIServerCertSANs: apiServerCertSANs, APIServerCertSANs: apiServerCertSANs,
InitSecret: secret, InitSecret: []byte(secret),
UID: uid, UID: uid,
Name: name,
} }
switch provider { switch provider {

View File

@ -14,3 +14,7 @@ output "initSecret" {
value = random_password.initSecret.result value = random_password.initSecret.result
sensitive = true sensitive = true
} }
output "name" {
value = local.name
}

View File

@ -39,3 +39,7 @@ output "resource_group" {
output "subscription_id" { output "subscription_id" {
value = data.azurerm_subscription.current.subscription_id value = data.azurerm_subscription.current.subscription_id
} }
output "name" {
value = local.name
}

View File

@ -30,3 +30,7 @@ output "ip_cidr_nodes" {
output "ip_cidr_pods" { output "ip_cidr_pods" {
value = local.cidr_vpc_subnet_pods value = local.cidr_vpc_subnet_pods
} }
output "name" {
value = local.name
}

View File

@ -14,3 +14,7 @@ output "initSecret" {
value = random_password.initSecret.result value = random_password.initSecret.result
sensitive = true sensitive = true
} }
output "name" {
value = local.name
}

View File

@ -38,3 +38,7 @@ output "validate_constellation_cmdline" {
error_message = "constellation_cmdline must be set if constellation_boot_mode is 'direct-linux-boot'" error_message = "constellation_cmdline must be set if constellation_boot_mode is 'direct-linux-boot'"
} }
} }
output "name" {
value = "${var.name}-qemu" // placeholder, as per "uid" output
}

View File

@ -223,6 +223,9 @@ func TestCreateCluster(t *testing.T) {
"api_server_cert_sans": { "api_server_cert_sans": {
Value: []any{"192.0.2.100"}, Value: []any{"192.0.2.100"},
}, },
"name": {
Value: "constell-12345abc",
},
}, },
}, },
} }
@ -262,6 +265,9 @@ func TestCreateCluster(t *testing.T) {
"loadbalancer_name": { "loadbalancer_name": {
Value: "test_lb_name", Value: "test_lb_name",
}, },
"name": {
Value: "constell-12345abc",
},
}, },
}, },
} }
@ -398,6 +404,20 @@ func TestCreateCluster(t *testing.T) {
fs: afero.NewMemMapFs(), fs: afero.NewMemMapFs(),
wantErr: true, wantErr: true,
}, },
"name has wrong type": {
pathBase: "terraform",
provider: cloudprovider.QEMU,
vars: qemuVars,
tf: &stubTerraform{
showState: &tfjson.State{
Values: &tfjson.StateValues{
Outputs: map[string]*tfjson.StateOutput{"name": {Value: 42}},
},
},
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"working attestation url": { "working attestation url": {
pathBase: "terraform", pathBase: "terraform",
provider: cloudprovider.Azure, provider: cloudprovider.Azure,
@ -457,7 +477,7 @@ func TestCreateCluster(t *testing.T) {
} }
assert.NoError(err) assert.NoError(err)
assert.Equal("192.0.2.100", infraState.ClusterEndpoint) assert.Equal("192.0.2.100", infraState.ClusterEndpoint)
assert.Equal("initSecret", infraState.InitSecret) assert.Equal([]byte("initSecret"), infraState.InitSecret)
assert.Equal("12345abc", infraState.UID) assert.Equal("12345abc", infraState.UID)
if tc.provider == cloudprovider.Azure { if tc.provider == cloudprovider.Azure {
assert.Equal(tc.expectedAttestationURL, infraState.Azure.AttestationURL) assert.Equal(tc.expectedAttestationURL, infraState.Azure.AttestationURL)

View File

@ -113,11 +113,11 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c
return err return err
} }
if len(ips) == 0 { if len(ips) == 0 {
var idFile clusterIDsFile var stateFile clusterStateFile
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil { if err := fileHandler.ReadYAML(constants.StateFilename, &stateFile); err != nil {
return fmt.Errorf("reading cluster IDs file: %w", err) return fmt.Errorf("reading cluster state file: %w", err)
} }
ips = []string{idFile.IP} ips = []string{stateFile.Infrastructure.ClusterEndpoint}
} }
info, err := cmd.Flags().GetStringToString("info") info, err := cmd.Flags().GetStringToString("info")
@ -285,8 +285,8 @@ type fileTransferer interface {
SetFiles(files []filetransfer.FileStat) SetFiles(files []filetransfer.FileStat)
} }
type clusterIDsFile struct { type clusterStateFile struct {
ClusterID string Infrastructure struct {
OwnerID string ClusterEndpoint string `yaml:"clusterEndpoint"`
IP string } `yaml:"infrastructure"`
} }

View File

@ -8,7 +8,7 @@ The CLI is also used for updating your cluster.
## Workspaces ## Workspaces
Each Constellation cluster has an associated *workspace*. Each Constellation cluster has an associated *workspace*.
The workspace is where data such as the Constellation state, config, and ID files are stored. The workspace is where data such as the Constellation state and config files are stored.
Each workspace is associated with a single cluster and configuration. Each workspace is associated with a single cluster and configuration.
The CLI stores state in the local filesystem making the current directory the active workspace. The CLI stores state in the local filesystem making the current directory the active workspace.
Multiple clusters require multiple workspaces, hence, multiple directories. Multiple clusters require multiple workspaces, hence, multiple directories.
@ -21,14 +21,14 @@ To allow for fine-grained configuration of your cluster and cloud environment, C
Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace:
* a configuration file * a configuration file
* an ID file * a state file
* a Base64-encoded master secret * a Base64-encoded master secret
* [Terraform artifacts](../reference/terraform.md), stored in subdirectories * [Terraform artifacts](../reference/terraform.md), stored in subdirectories
* a Kubernetes `kubeconfig` file. * a Kubernetes `kubeconfig` file.
After the creation of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file.
This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool.
In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in a file called `constellation-id.json` In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file.
### Creation process details ### Creation process details

View File

@ -380,7 +380,7 @@ Verify the confidential properties of a Constellation cluster
### Synopsis ### Synopsis
Verify the confidential properties of a Constellation cluster. Verify the confidential properties of a Constellation cluster.
If arguments aren't specified, values are read from `constellation-id.json`. If arguments aren't specified, values are read from `constellation-state.yaml`.
``` ```
constellation verify [flags] constellation verify [flags]

View File

@ -65,14 +65,16 @@ terraform init
terraform apply terraform apply
``` ```
The Constellation [init step](#the-init-step) requires the already created `constellation-config.yaml` and the `constellation-id.json`. The Constellation [init step](#the-init-step) requires the already created `constellation-config.yaml` and the `constellation-state.yaml`.
Create the `constellation-id.json` using the output from the Terraform state and the `constellation-conf.yaml`: Create the `constellation-state.yaml` using the output from the Terraform state and the `constellation-conf.yaml`:
```bash ```bash
CONSTELL_IP=$(terraform output ip) CONSTELL_IP=$(terraform output ip)
CONSTELL_INIT_SECRET=$(terraform output initSecret | jq -r | tr -d '\n' | base64) CONSTELL_INIT_SECRET=$(terraform output initSecret | jq -r | tr -d '\n' | base64)
CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") touch constellation-state.yaml
jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json yq eval '.version ="v1"' --inplace constellation-state.yaml
yq eval '.infrastructure.initSecret ="$CONSTELL_INIT_SECRET"' --inplace constellation-state.yaml
yq eval '.infrastructure.clusterEndpoint ="$CONSTELL_IP"' --inplace constellation-state.yaml
``` ```
</tabItem> </tabItem>

View File

@ -125,7 +125,7 @@ This means that you have to recover the node manually.
Recovering a cluster requires the following parameters: Recovering a cluster requires the following parameters:
* The `constellation-id.json` file in your working directory or the cluster's load balancer IP address * The `constellation-state.yaml` file in your working directory or the cluster's endpoint
* The master secret of the cluster * The master secret of the cluster
A cluster can be recovered like this: A cluster can be recovered like this:

View File

@ -51,7 +51,7 @@ terraform destroy
Delete all files that are no longer needed: Delete all files that are no longer needed:
```bash ```bash
rm constellation-id.json constellation-admin.conf rm constellation-state.yaml constellation-admin.conf
``` ```
Only the `constellation-mastersecret.json` and the configuration file remain. Only the `constellation-mastersecret.json` and the configuration file remain.

View File

@ -78,7 +78,7 @@ From the attestation statement, the command verifies the following properties:
* The cluster is using the correct Confidential VM (CVM) type. * The cluster is using the correct Confidential VM (CVM) type.
* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. * Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step.
* The unique ID of the cluster matches the one from your `constellation-id.json` file or passed in via `--cluster-id`. * The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`.
Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape.

1
go.mod
View File

@ -45,6 +45,7 @@ require (
cloud.google.com/go/logging v1.7.0 cloud.google.com/go/logging v1.7.0
cloud.google.com/go/secretmanager v1.11.1 cloud.google.com/go/secretmanager v1.11.1
cloud.google.com/go/storage v1.31.0 cloud.google.com/go/storage v1.31.0
dario.cat/mergo v1.0.0
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0

2
go.sum
View File

@ -59,6 +59,8 @@ cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdT
cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0= cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=

View File

@ -57,6 +57,7 @@ require (
cloud.google.com/go/compute v1.20.1 // indirect cloud.google.com/go/compute v1.20.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect

View File

@ -47,6 +47,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=

View File

@ -232,3 +232,8 @@ func (h *Handler) CopyFile(src, dst string, opts ...Option) error {
return nil return nil
} }
// RenameFile renames a file, overwriting any existing file at the destination.
func (h *Handler) RenameFile(old, new string) error {
return h.fs.Rename(old, new)
}

View File

@ -540,3 +540,58 @@ func TestCopyDir(t *testing.T) {
}) })
} }
} }
func TestRename(t *testing.T) {
setupHandler := func(existingFiles ...string) Handler {
fs := afero.NewMemMapFs()
handler := NewHandler(fs)
for _, file := range existingFiles {
err := handler.Write(file, []byte("some content"), OptMkdirAll)
require.NoError(t, err)
}
return handler
}
testCases := map[string]struct {
handler Handler
renames map[string]string
checkFiles []string
wantErr bool
}{
"successful rename": {
handler: setupHandler("someFile"),
renames: map[string]string{"someFile": "someOtherFile"},
checkFiles: []string{"someOtherFile"},
},
"rename to existing file, overwrite": {
handler: setupHandler("someFile", "someOtherFile"),
renames: map[string]string{"someFile": "someOtherFile"},
checkFiles: []string{"someOtherFile"},
},
"file does not exist": {
handler: setupHandler(),
renames: map[string]string{"someFile": "someOtherFile"},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
for old, new := range tc.renames {
err := tc.handler.RenameFile(old, new)
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
}
}
for _, file := range tc.checkFiles {
_, err := tc.handler.fs.Stat(file)
require.NoError(err)
}
})
}
}

View File

@ -91,6 +91,7 @@ clusterValues:
clusterID: "00112233445566778899AABBCCDDEEFF" # cluster ID uniquely identifies this Constellation cluster. clusterID: "00112233445566778899AABBCCDDEEFF" # cluster ID uniquely identifies this Constellation cluster.
ownerID: "00112233445566778899AABBCCDDEEFF" # owner ID identifies this cluster as belonging to owner. ownerID: "00112233445566778899AABBCCDDEEFF" # owner ID identifies this cluster as belonging to owner.
measurementSalt: "c2VjcmV0Cg==" # measurement salt is used by nodes to derive their cluster ID. measurementSalt: "c2VjcmV0Cg==" # measurement salt is used by nodes to derive their cluster ID.
name: "constell-001122" # name of the cluster, as used in e.g. cluster resource naming.
``` ```
## Updates to the state file ## Updates to the state file