cli: use state file on init and upgrade (#2395)

* [wip] use state file in CLI

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

tidy

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* use state file in CLI

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

take clusterConfig from IDFile for compat

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

various fixes

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

wip

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add GCP-specific values in Helm loader test

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove unnecessary pointer

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* write ClusterValues in one step

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* move stub to test file

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove mention of id-file

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* move output to `migrateTerraform`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* unconditional assignments converting from idFile

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* move require block in go modules file

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* fall back to id file on upgrade

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* tidy

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* fix linter check

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add notice to remove Terraform state check on manual migration

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add `name` field

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

fix name tests

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* return early if no Terraform diff

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* tidy

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* return infrastructure state even if no diff exists

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add TODO to remove comment

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* use state-file in miniconstellation

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* cli: remove id-file (#2402)

* remove id-file from `constellation create`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add file renaming to handler

* rename id-file after upgrade

* use idFile on `constellation init`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `constellation verify`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* linter fixes

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `constellation mini`

* remove id-file from `constellation recover`

* linter fixes

* remove id-file from `constellation terminate`

* fix initSecret type

* fix recover argument precedence

* fix terminate test

* generate

* add TODO to remove id-file removal

* Update cli/internal/cmd/init.go

Co-authored-by: Adrian Stobbe <stobbe.adrian@gmail.com>

* fix verify arg parse logic

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* add version test

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from docs

* add file not found log

* use state-file in miniconstellation

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `constellation iam destroy`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* remove id-file from `cdbg deploy`

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

---------

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
Co-authored-by: Adrian Stobbe <stobbe.adrian@gmail.com>

* use state-file in CI

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>

* update orchestration docs

---------

Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
Co-authored-by: Adrian Stobbe <stobbe.adrian@gmail.com>
This commit is contained in:
Moritz Sanft 2023-10-09 13:04:29 +02:00 committed by GitHub
parent dbf40d185c
commit 005e865a13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 1189 additions and 497 deletions

View File

@ -181,7 +181,7 @@ runs:
CSP: ${{ inputs.cloudProvider }}
run: |
echo "::group::Download boot logs"
CONSTELL_UID=$(yq '.uid' constellation-id.json)
CONSTELL_UID=$(yq '.infrastructure.uid' constellation-state.yaml)
case $CSP in
azure)
AZURE_RESOURCE_GROUP=$(yq eval ".provider.azure.resourceGroup" constellation-conf.yaml)

View File

@ -39,14 +39,14 @@ runs:
- name: Constellation verify
shell: bash
run: constellation verify --cluster-id $(jq -r ".clusterID" constellation-id.json) --force
run: constellation verify --cluster-id $(jq -r ".clusterValues.clusterID" constellation-state.yaml) --force
- name: Verify all nodes
shell: bash
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
run: |
clusterID=$(jq -r ".clusterID" constellation-id.json)
clusterID=$(jq -r ".clusterValues.clusterID" constellation-state.yaml)
nodes=$(kubectl get nodes -o json | jq -r ".items[].metadata.name")
for node in $nodes ; do

View File

@ -15,6 +15,14 @@ def go_dependencies():
sum = "h1:tdpHgTbmbvEIARu+bixzmleMi14+3imnpoFXz+Qzjp4=",
version = "v1.31.0-20230802163732-1c33ebd9ecfa.1",
)
go_repository(
name = "cat_dario_mergo",
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "dario.cat/mergo",
sum = "h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=",
version = "v1.0.0",
)
go_repository(
name = "cc_mvdan_editorconfig",

View File

@ -48,7 +48,7 @@ type stubTerraformClient struct {
func (c *stubTerraformClient) ApplyCluster(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (state.Infrastructure, error) {
return state.Infrastructure{
ClusterEndpoint: c.ip,
InitSecret: c.initSecret,
InitSecret: []byte(c.initSecret),
UID: c.uid,
Azure: &state.Azure{
AttestationURL: c.attestationURL,

View File

@ -151,6 +151,7 @@ go_test(
"//internal/cloud/gcpshared",
"//internal/config",
"//internal/constants",
"//internal/crypto",
"//internal/crypto/testvector",
"//internal/file",
"//internal/grpc/atlscredentials",

View File

@ -12,14 +12,12 @@ import (
"io/fs"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
@ -172,35 +170,15 @@ func (c *createCmd) create(cmd *cobra.Command, creator cloudCreator, fileHandler
}
c.log.Debugf("Successfully created the cloud resources for the cluster")
idFile := convertToIDFile(infraState, provider)
if err := fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone); err != nil {
return err
}
state := state.NewState(infraState)
if err := fileHandler.WriteYAML(constants.StateFilename, state, file.OptNone); err != nil {
return err
state := state.New().SetInfrastructure(infraState)
if err := state.WriteToFile(fileHandler, constants.StateFilename); err != nil {
return fmt.Errorf("writing state file: %w", err)
}
cmd.Println("Your Constellation cluster was created successfully.")
return nil
}
func convertToIDFile(infra state.Infrastructure, provider cloudprovider.Provider) clusterid.File {
var file clusterid.File
file.CloudProvider = provider
file.IP = infra.ClusterEndpoint
file.APIServerCertSANs = infra.APIServerCertSANs
file.InitSecret = []byte(infra.InitSecret) // Convert string to []byte
file.UID = infra.UID
if infra.Azure != nil {
file.AttestationURL = infra.Azure.AttestationURL
}
return file
}
// parseCreateFlags parses the flags of the create command.
func (c *createCmd) parseCreateFlags(cmd *cobra.Command) (createFlags, error) {
yes, err := cmd.Flags().GetBool("yes")
@ -256,9 +234,9 @@ func (c *createCmd) checkDirClean(fileHandler file.Handler) error {
if _, err := fileHandler.Stat(constants.MasterSecretFilename); !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.MasterSecretFilename))
}
c.log.Debugf("Checking cluster IDs file")
if _, err := fileHandler.Stat(constants.ClusterIDsFilename); !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous cluster IDs. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.ClusterIDsFilename))
c.log.Debugf("Checking state file")
if _, err := fileHandler.Stat(constants.StateFilename); !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("file '%s' already exists in working directory. Constellation won't overwrite previous cluster state. Move it somewhere or delete it before creating a new cluster", c.pf.PrefixPrintablePath(constants.StateFilename))
}
return nil

View File

@ -11,7 +11,6 @@ import (
"errors"
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
@ -154,22 +153,16 @@ func TestCreate(t *testing.T) {
assert.False(tc.creator.createCalled)
} else {
assert.True(tc.creator.createCalled)
var gotIDFile clusterid.File
require.NoError(fileHandler.ReadJSON(constants.ClusterIDsFilename, &gotIDFile))
assert.Equal(gotIDFile, clusterid.File{
IP: infraState.ClusterEndpoint,
CloudProvider: tc.provider,
})
var gotState state.State
expectedState := state.Infrastructure{
ClusterEndpoint: "192.0.2.1",
APIServerCertSANs: []string{},
InitSecret: []byte{},
}
require.NoError(fileHandler.ReadYAML(constants.StateFilename, &gotState))
assert.Equal("v1", gotState.Version)
assert.Equal(expectedState, gotState.Infrastructure)
}
}
})

View File

@ -67,10 +67,10 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr
if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.AdminConfFilename))
}
c.log.Debugf("Checking if %q exists", c.pf.PrefixPrintablePath(constants.ClusterIDsFilename))
_, err = fsHandler.Stat(constants.ClusterIDsFilename)
c.log.Debugf("Checking if %q exists", c.pf.PrefixPrintablePath(constants.StateFilename))
_, err = fsHandler.Stat(constants.StateFilename)
if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.ClusterIDsFilename))
return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.pf.PrefixPrintablePath(constants.StateFilename))
}
gcpFileExists := false

View File

@ -36,9 +36,9 @@ func TestIAMDestroy(t *testing.T) {
require.NoError(fh.Write(constants.AdminConfFilename, []byte("")))
return fh
}
newFsWithClusterIDFile := func() file.Handler {
newFsWithStateFile := func() file.Handler {
fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(fh.Write(constants.ClusterIDsFilename, []byte("")))
require.NoError(fh.Write(constants.StateFilename, []byte("")))
return fh
}
@ -56,8 +56,8 @@ func TestIAMDestroy(t *testing.T) {
yesFlag: "false",
wantErr: true,
},
"cluster running cluster ids": {
fh: newFsWithClusterIDFile(),
"cluster running cluster state": {
fh: newFsWithStateFile(),
iamDestroyer: &stubIAMDestroyer{},
yesFlag: "false",
wantErr: true,

View File

@ -36,13 +36,10 @@ import (
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
@ -77,20 +74,15 @@ type initCmd struct {
merger configMerger
spinner spinnerInterf
fileHandler file.Handler
clusterShower infrastructureShower
pf pathprefix.PathPrefixer
}
func newInitCmd(
clusterShower infrastructureShower, fileHandler file.Handler,
spinner spinnerInterf, merger configMerger, log debugLog,
) *initCmd {
func newInitCmd(fileHandler file.Handler, spinner spinnerInterf, merger configMerger, log debugLog) *initCmd {
return &initCmd{
log: log,
merger: merger,
spinner: spinner,
fileHandler: fileHandler,
clusterShower: clusterShower,
}
}
@ -116,12 +108,7 @@ func runInitialize(cmd *cobra.Command, _ []string) error {
defer cancel()
cmd.SetContext(ctx)
tfClient, err := terraform.New(ctx, constants.TerraformWorkingDir)
if err != nil {
return fmt.Errorf("creating Terraform client: %w", err)
}
i := newInitCmd(tfClient, fileHandler, spinner, &kubeconfigMerger{log: log}, log)
i := newInitCmd(fileHandler, spinner, &kubeconfigMerger{log: log}, log)
fetcher := attestationconfigapi.NewFetcher()
newAttestationApplier := func(w io.Writer, kubeConfig string, log debugLog) (attestationConfigApplier, error) {
return kubecmd.New(w, kubeConfig, fileHandler, log)
@ -168,10 +155,9 @@ func (i *initCmd) initialize(
cmd.PrintErrln("WARNING: Attestation temporarily relies on AWS nitroTPM. See https://docs.edgeless.systems/constellation/workflows/config#choosing-a-vm-type for more information.")
}
i.log.Debugf("Checking cluster ID file")
var idFile clusterid.File
if err := i.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
return fmt.Errorf("reading cluster ID file: %w", err)
stateFile, err := state.ReadFromFile(i.fileHandler, constants.StateFilename)
if err != nil {
return fmt.Errorf("reading state file: %w", err)
}
i.log.Debugf("Validated k8s version as %s", k8sVersion)
@ -187,7 +173,10 @@ func (i *initCmd) initialize(
}
i.log.Debugf("Checked license")
conf.UpdateMAAURL(idFile.AttestationURL)
if stateFile.Infrastructure.Azure != nil {
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
i.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())
validator, err := cloudcmd.NewValidator(cmd, conf.GetAttestationConfig(), i.log)
if err != nil {
@ -205,15 +194,14 @@ func (i *initCmd) initialize(
if err != nil {
return fmt.Errorf("generating master secret: %w", err)
}
i.log.Debugf("Generated measurement salt")
i.log.Debugf("Generating measurement salt")
measurementSalt, err := crypto.GenerateRandomBytes(crypto.RNGLengthDefault)
if err != nil {
return fmt.Errorf("generating measurement salt: %w", err)
}
idFile.MeasurementSalt = measurementSalt
clusterName := clusterid.GetClusterName(conf, idFile)
i.log.Debugf("Setting cluster name to %s", clusterName)
i.log.Debugf("Setting cluster name to %s", stateFile.Infrastructure.Name)
cmd.PrintErrln("Note: If you just created the cluster, it can take a few minutes to connect.")
i.spinner.Start("Connecting ", false)
@ -224,12 +212,12 @@ func (i *initCmd) initialize(
KubernetesVersion: versions.VersionConfigs[k8sVersion].ClusterVersion,
KubernetesComponents: versions.VersionConfigs[k8sVersion].KubernetesComponents.ToInitProto(),
ConformanceMode: flags.conformance,
InitSecret: idFile.InitSecret,
ClusterName: clusterName,
ApiserverCertSans: idFile.APIServerCertSANs,
InitSecret: stateFile.Infrastructure.InitSecret,
ClusterName: stateFile.Infrastructure.Name,
ApiserverCertSans: stateFile.Infrastructure.APIServerCertSANs,
}
i.log.Debugf("Sending initialization request")
resp, err := i.initCall(cmd.Context(), newDialer(validator), idFile.IP, req)
resp, err := i.initCall(cmd.Context(), newDialer(validator), stateFile.Infrastructure.ClusterEndpoint, req)
i.spinner.Stop()
if err != nil {
@ -247,11 +235,8 @@ func (i *initCmd) initialize(
}
i.log.Debugf("Initialization request succeeded")
i.log.Debugf("Writing Constellation ID file")
idFile.CloudProvider = provider
bufferedOutput := &bytes.Buffer{}
if err := i.writeOutput(idFile, resp, flags.mergeConfigs, bufferedOutput); err != nil {
if err := i.writeOutput(stateFile, resp, flags.mergeConfigs, bufferedOutput, measurementSalt); err != nil {
return err
}
@ -263,11 +248,6 @@ func (i *initCmd) initialize(
return fmt.Errorf("applying attestation config: %w", err)
}
infraState, err := i.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
if err != nil {
return fmt.Errorf("getting infrastructure state: %w", err)
}
i.spinner.Start("Installing Kubernetes components ", false)
options := helm.Options{
Force: flags.force,
@ -279,8 +259,7 @@ func (i *initCmd) initialize(
if err != nil {
return fmt.Errorf("creating Helm client: %w", err)
}
executor, includesUpgrades, err := helmApplier.PrepareApply(conf, idFile, options, infraState,
serviceAccURI, masterSecret)
executor, includesUpgrades, err := helmApplier.PrepareApply(conf, stateFile, options, serviceAccURI, masterSecret)
if err != nil {
return fmt.Errorf("getting Helm chart executor: %w", err)
}
@ -457,23 +436,32 @@ func (d *initDoer) handleGRPCStateChanges(ctx context.Context, wg *sync.WaitGrou
})
}
// writeOutput writes the output of a cluster initialization to the
// state- / id- / kubeconfig-file and saves it to disk.
func (i *initCmd) writeOutput(
idFile clusterid.File, initResp *initproto.InitSuccessResponse, mergeConfig bool, wr io.Writer,
stateFile *state.State,
initResp *initproto.InitSuccessResponse,
mergeConfig bool, wr io.Writer,
measurementSalt []byte,
) error {
fmt.Fprint(wr, "Your Constellation cluster was successfully initialized.\n\n")
ownerID := hex.EncodeToString(initResp.GetOwnerId())
// i.log.Debugf("Owner id is %s", ownerID)
clusterID := hex.EncodeToString(initResp.GetClusterId())
stateFile.SetClusterValues(state.ClusterValues{
MeasurementSalt: measurementSalt,
OwnerID: ownerID,
ClusterID: clusterID,
})
tw := tabwriter.NewWriter(wr, 0, 0, 2, ' ', 0)
// writeRow(tw, "Constellation cluster's owner identifier", ownerID)
writeRow(tw, "Constellation cluster identifier", clusterID)
writeRow(tw, "Kubernetes configuration", i.pf.PrefixPrintablePath(constants.AdminConfFilename))
tw.Flush()
fmt.Fprintln(wr)
i.log.Debugf("Rewriting cluster server address in kubeconfig to %s", idFile.IP)
i.log.Debugf("Rewriting cluster server address in kubeconfig to %s", stateFile.Infrastructure.ClusterEndpoint)
kubeconfig, err := clientcmd.Load(initResp.GetKubeconfig())
if err != nil {
return fmt.Errorf("loading kubeconfig: %w", err)
@ -486,7 +474,7 @@ func (i *initCmd) writeOutput(
if err != nil {
return fmt.Errorf("parsing kubeconfig server URL: %w", err)
}
kubeEndpoint.Host = net.JoinHostPort(idFile.IP, kubeEndpoint.Port())
kubeEndpoint.Host = net.JoinHostPort(stateFile.Infrastructure.ClusterEndpoint, kubeEndpoint.Port())
cluster.Server = kubeEndpoint.String()
}
kubeconfigBytes, err := clientcmd.Write(*kubeconfig)
@ -508,13 +496,11 @@ func (i *initCmd) writeOutput(
}
}
idFile.OwnerID = ownerID
idFile.ClusterID = clusterID
if err := i.fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptOverwrite); err != nil {
return fmt.Errorf("writing Constellation ID file: %w", err)
if err := stateFile.WriteToFile(i.fileHandler, constants.StateFilename); err != nil {
return fmt.Errorf("writing Constellation state file: %w", err)
}
i.log.Debugf("Constellation ID file written to %s", i.pf.PrefixPrintablePath(constants.ClusterIDsFilename))
i.log.Debugf("Constellation state file written to %s", i.pf.PrefixPrintablePath(constants.StateFilename))
if !mergeConfig {
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")
@ -694,11 +680,7 @@ type attestationConfigApplier interface {
}
type helmApplier interface {
PrepareApply(conf *config.Config, idFile clusterid.File,
flags helm.Options, infra state.Infrastructure, serviceAccURI string, masterSecret uri.MasterSecret) (
PrepareApply(conf *config.Config, stateFile *state.State,
flags helm.Options, serviceAccURI string, masterSecret uri.MasterSecret) (
helm.Applier, bool, error)
}
type infrastructureShower interface {
ShowInfrastructure(ctx context.Context, provider cloudprovider.Provider) (state.Infrastructure, error)
}

View File

@ -21,7 +21,6 @@ import (
"time"
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
@ -92,7 +91,7 @@ func TestInitialize(t *testing.T) {
testCases := map[string]struct {
provider cloudprovider.Provider
idFile *clusterid.File
stateFile *state.State
configMutator func(*config.Config)
serviceAccKey *gcpshared.ServiceAccountKey
initServerAPI *stubInitServer
@ -102,24 +101,24 @@ func TestInitialize(t *testing.T) {
}{
"initialize some gcp instances": {
provider: cloudprovider.GCP,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
serviceAccKey: gcpServiceAccKey,
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
},
"initialize some azure instances": {
provider: cloudprovider.Azure,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
},
"initialize some qemu instances": {
provider: cloudprovider.QEMU,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
},
"non retriable error": {
provider: cloudprovider.QEMU,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{initErr: &nonRetriableError{err: assert.AnError}},
retriable: false,
masterSecretShouldExist: true,
@ -127,7 +126,7 @@ func TestInitialize(t *testing.T) {
},
"non retriable error with failed log collection": {
provider: cloudprovider.QEMU,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{
res: []*initproto.InitResponse{
{
@ -150,28 +149,35 @@ func TestInitialize(t *testing.T) {
masterSecretShouldExist: true,
wantErr: true,
},
"empty id file": {
"state file with only version": {
provider: cloudprovider.GCP,
idFile: &clusterid.File{},
stateFile: &state.State{Version: state.Version1},
initServerAPI: &stubInitServer{},
retriable: true,
wantErr: true,
},
"no id file": {
"empty state file": {
provider: cloudprovider.GCP,
stateFile: &state.State{},
initServerAPI: &stubInitServer{},
retriable: true,
wantErr: true,
},
"no state file": {
provider: cloudprovider.GCP,
retriable: true,
wantErr: true,
},
"init call fails": {
provider: cloudprovider.GCP,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{initErr: assert.AnError},
retriable: true,
wantErr: true,
},
"k8s version without v works": {
provider: cloudprovider.Azure,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
configMutator: func(c *config.Config) {
res, err := versions.NewValidK8sVersion(strings.TrimPrefix(string(versions.Default), "v"), true)
@ -181,7 +187,7 @@ func TestInitialize(t *testing.T) {
},
"outdated k8s patch version doesn't work": {
provider: cloudprovider.Azure,
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
configMutator: func(c *config.Config) {
v, err := semver.New(versions.SupportedK8sVersions()[0])
@ -229,9 +235,10 @@ func TestInitialize(t *testing.T) {
tc.configMutator(config)
}
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptNone))
if tc.idFile != nil {
tc.idFile.CloudProvider = tc.provider
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, tc.idFile, file.OptNone))
stateFile := state.New()
require.NoError(stateFile.WriteToFile(fileHandler, constants.StateFilename))
if tc.stateFile != nil {
require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename))
}
if tc.serviceAccKey != nil {
require.NoError(fileHandler.WriteJSON(serviceAccPath, tc.serviceAccKey, file.OptNone))
@ -241,11 +248,16 @@ func TestInitialize(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel()
cmd.SetContext(ctx)
i := newInitCmd(&stubShowInfrastructure{}, fileHandler, &nopSpinner{}, nil, logger.NewTest(t))
err := i.initialize(cmd, newDialer, &stubLicenseClient{}, stubAttestationFetcher{},
i := newInitCmd(fileHandler, &nopSpinner{}, nil, logger.NewTest(t))
err := i.initialize(
cmd,
newDialer,
&stubLicenseClient{},
stubAttestationFetcher{},
func(io.Writer, string, debugLog) (attestationConfigApplier, error) {
return &stubAttestationApplier{}, nil
}, func(_ string, _ debugLog) (helmApplier, error) {
},
func(_ string, _ debugLog) (helmApplier, error) {
return &stubApplier{}, nil
})
@ -277,7 +289,7 @@ type stubApplier struct {
err error
}
func (s stubApplier) PrepareApply(_ *config.Config, _ clusterid.File, _ helm.Options, _ state.Infrastructure, _ string, _ uri.MasterSecret) (helm.Applier, bool, error) {
func (s stubApplier) PrepareApply(_ *config.Config, _ *state.State, _ helm.Options, _ string, _ uri.MasterSecret) (helm.Applier, bool, error) {
return stubRunner{}, false, s.err
}
@ -386,26 +398,33 @@ func TestWriteOutput(t *testing.T) {
ownerID := hex.EncodeToString(resp.GetInitSuccess().GetOwnerId())
clusterID := hex.EncodeToString(resp.GetInitSuccess().GetClusterId())
measurementSalt := []byte{0x41}
expectedIDFile := clusterid.File{
expectedStateFile := &state.State{
Version: state.Version1,
ClusterValues: state.ClusterValues{
ClusterID: clusterID,
OwnerID: ownerID,
IP: clusterEndpoint,
UID: "test-uid",
MeasurementSalt: []byte{0x41},
},
Infrastructure: state.Infrastructure{
APIServerCertSANs: []string{},
InitSecret: []byte{},
ClusterEndpoint: clusterEndpoint,
},
}
var out bytes.Buffer
testFs := afero.NewMemMapFs()
fileHandler := file.NewHandler(testFs)
idFile := clusterid.File{
UID: "test-uid",
IP: clusterEndpoint,
}
i := newInitCmd(nil, fileHandler, &nopSpinner{}, &stubMerger{}, logger.NewTest(t))
err = i.writeOutput(idFile, resp.GetInitSuccess(), false, &out)
stateFile := state.New().SetInfrastructure(state.Infrastructure{
ClusterEndpoint: clusterEndpoint,
})
i := newInitCmd(fileHandler, &nopSpinner{}, &stubMerger{}, logger.NewTest(t))
err = i.writeOutput(stateFile, resp.GetInitSuccess(), false, &out, measurementSalt)
require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), constants.AdminConfFilename)
@ -415,20 +434,17 @@ func TestWriteOutput(t *testing.T) {
assert.Contains(string(adminConf), clusterEndpoint)
assert.Equal(string(expectedKubeconfigBytes), string(adminConf))
idsFile, err := afs.ReadFile(constants.ClusterIDsFilename)
fh := file.NewHandler(afs)
readStateFile, err := state.ReadFromFile(fh, constants.StateFilename)
assert.NoError(err)
var testIDFile clusterid.File
err = json.Unmarshal(idsFile, &testIDFile)
assert.NoError(err)
assert.Equal(expectedIDFile, testIDFile)
assert.Equal(expectedStateFile, readStateFile)
out.Reset()
require.NoError(afs.Remove(constants.AdminConfFilename))
// test custom workspace
i.pf = pathprefix.New("/some/path")
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out)
err = i.writeOutput(stateFile, resp.GetInitSuccess(), true, &out, measurementSalt)
require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), i.pf.PrefixPrintablePath(constants.AdminConfFilename))
out.Reset()
@ -437,9 +453,8 @@ func TestWriteOutput(t *testing.T) {
i.pf = pathprefix.PathPrefixer{}
// test config merging
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out)
err = i.writeOutput(stateFile, resp.GetInitSuccess(), true, &out, measurementSalt)
require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), constants.AdminConfFilename)
assert.Contains(out.String(), "Constellation kubeconfig merged with default config")
@ -449,9 +464,8 @@ func TestWriteOutput(t *testing.T) {
// test config merging with env vars set
i.merger = &stubMerger{envVar: "/some/path/to/kubeconfig"}
err = i.writeOutput(idFile, resp.GetInitSuccess(), true, &out)
err = i.writeOutput(stateFile, resp.GetInitSuccess(), true, &out, measurementSalt)
require.NoError(err)
// assert.Contains(out.String(), ownerID)
assert.Contains(out.String(), clusterID)
assert.Contains(out.String(), constants.AdminConfFilename)
assert.Contains(out.String(), "Constellation kubeconfig merged with default config")
@ -496,7 +510,7 @@ func TestGenerateMasterSecret(t *testing.T) {
require.NoError(tc.createFileFunc(fileHandler))
var out bytes.Buffer
i := newInitCmd(nil, fileHandler, nil, nil, logger.NewTest(t))
i := newInitCmd(fileHandler, nil, nil, logger.NewTest(t))
secret, err := i.generateMasterSecret(&out)
if tc.wantErr {
@ -530,7 +544,8 @@ func TestAttestation(t *testing.T) {
},
},
}}
existingIDFile := &clusterid.File{IP: "192.0.2.4", CloudProvider: cloudprovider.QEMU}
existingStateFile := &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.4"}}
netDialer := testdialer.NewBufconnDialer()
@ -561,7 +576,7 @@ func TestAttestation(t *testing.T) {
fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, existingIDFile, file.OptNone))
require.NoError(existingStateFile.WriteToFile(fileHandler, constants.StateFilename))
cfg := config.Default()
cfg.Image = "v0.0.0" // is the default version of the the CLI (before build injects the real version)
@ -588,7 +603,7 @@ func TestAttestation(t *testing.T) {
defer cancel()
cmd.SetContext(ctx)
i := newInitCmd(nil, fileHandler, &nopSpinner{}, nil, logger.NewTest(t))
i := newInitCmd(fileHandler, &nopSpinner{}, nil, logger.NewTest(t))
err := i.initialize(cmd, newDialer, &stubLicenseClient{}, stubAttestationFetcher{},
func(io.Writer, string, debugLog) (attestationConfigApplier, error) {
return &stubAttestationApplier{}, nil
@ -758,23 +773,10 @@ func (c stubInitClient) Recv() (*initproto.InitResponse, error) {
return res, err
}
type stubShowInfrastructure struct{}
func (s *stubShowInfrastructure) ShowInfrastructure(_ context.Context, csp cloudprovider.Provider) (state.Infrastructure, error) {
res := state.Infrastructure{}
switch csp {
case cloudprovider.Azure:
res.Azure = &state.Azure{}
case cloudprovider.GCP:
res.GCP = &state.GCP{}
}
return res, nil
}
type stubAttestationApplier struct {
applyErr error
}
func (a *stubAttestationApplier) ApplyJoinConfig(_ context.Context, _ config.AttestationCfg, _ []byte) error {
func (a *stubAttestationApplier) ApplyJoinConfig(context.Context, config.AttestationCfg, []byte) error {
return a.applyErr
}

View File

@ -11,8 +11,7 @@ import (
"fmt"
"os"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero"
@ -44,14 +43,12 @@ func runDown(cmd *cobra.Command, args []string) error {
}
func checkForMiniCluster(fileHandler file.Handler) error {
var idFile clusterid.File
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
return err
stateFile, err := state.ReadFromFile(fileHandler, constants.StateFilename)
if err != nil {
return fmt.Errorf("reading state file: %w", err)
}
if idFile.CloudProvider != cloudprovider.QEMU {
return errors.New("cluster is not a QEMU based Constellation")
}
if idFile.UID != constants.MiniConstellationUID {
if stateFile.Infrastructure.UID != constants.MiniConstellationUID {
return errors.New("cluster is not a MiniConstellation cluster")
}

View File

@ -19,6 +19,7 @@ import (
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
"github.com/edgelesssys/constellation/v2/cli/internal/libvirt"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/atls"
@ -172,14 +173,18 @@ func (m *miniUpCmd) createMiniCluster(ctx context.Context, fileHandler file.Hand
TFWorkspace: constants.TerraformWorkingDir,
TFLogLevel: flags.tfLogLevel,
}
idFile, err := creator.Create(ctx, opts)
infraState, err := creator.Create(ctx, opts)
if err != nil {
return err
}
idFile.UID = constants.MiniConstellationUID // use UID "mini" to identify MiniConstellation clusters.
m.log.Debugf("Cluster id file contains %v", idFile)
return fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone)
infraState.UID = constants.MiniConstellationUID // use UID "mini" to identify MiniConstellation clusters.
stateFile := state.New().
SetInfrastructure(infraState)
m.log.Debugf("Cluster state file contains %v", stateFile)
return stateFile.WriteToFile(fileHandler, constants.StateFilename)
}
// initializeMiniCluster initializes a QEMU cluster.
@ -208,18 +213,13 @@ func (m *miniUpCmd) initializeMiniCluster(cmd *cobra.Command, fileHandler file.H
m.log.Debugf("Created new logger")
defer log.Sync()
tfClient, err := terraform.New(cmd.Context(), constants.TerraformWorkingDir)
if err != nil {
return fmt.Errorf("creating Terraform client: %w", err)
}
newAttestationApplier := func(w io.Writer, kubeConfig string, log debugLog) (attestationConfigApplier, error) {
return kubecmd.New(w, kubeConfig, fileHandler, log)
}
newHelmClient := func(kubeConfigPath string, log debugLog) (helmApplier, error) {
return helm.NewClient(kubeConfigPath, log)
} // need to defer helm client instantiation until kubeconfig is available
i := newInitCmd(tfClient, fileHandler, spinner, &kubeconfigMerger{log: log}, log)
i := newInitCmd(fileHandler, spinner, &kubeconfigMerger{log: log}, log)
if err := i.initialize(cmd, newDialer, license.NewClient(), m.configFetcher,
newAttestationApplier, newHelmClient); err != nil {
return err

View File

@ -16,15 +16,14 @@ import (
"time"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
grpcRetry "github.com/edgelesssys/constellation/v2/internal/grpc/retry"
@ -225,39 +224,40 @@ func (r *recoverCmd) parseRecoverFlags(cmd *cobra.Command, fileHandler file.Hand
r.log.Debugf("Workspace set to %q", workDir)
r.pf = pathprefix.New(workDir)
var idFile clusterid.File
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
return recoverFlags{}, err
}
endpoint, err := cmd.Flags().GetString("endpoint")
r.log.Debugf("Endpoint flag is %s", endpoint)
if err != nil {
return recoverFlags{}, fmt.Errorf("parsing endpoint argument: %w", err)
}
if endpoint == "" {
endpoint = idFile.IP
}
endpoint, err = addPortIfMissing(endpoint, constants.RecoveryPort)
if err != nil {
return recoverFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
}
r.log.Debugf("Endpoint value after parsing is %s", endpoint)
force, err := cmd.Flags().GetBool("force")
if err != nil {
return recoverFlags{}, fmt.Errorf("parsing force argument: %w", err)
}
var attestationURL string
stateFile := state.New()
if endpoint == "" {
stateFile, err = state.ReadFromFile(fileHandler, constants.StateFilename)
if err != nil {
return recoverFlags{}, fmt.Errorf("reading state file: %w", err)
}
endpoint = stateFile.Infrastructure.ClusterEndpoint
}
endpoint, err = addPortIfMissing(endpoint, constants.RecoveryPort)
if err != nil {
return recoverFlags{}, fmt.Errorf("validating endpoint argument: %w", err)
}
r.log.Debugf("Endpoint value after parsing is %s", endpoint)
if stateFile.Infrastructure.Azure != nil {
attestationURL = stateFile.Infrastructure.Azure.AttestationURL
}
return recoverFlags{
endpoint: endpoint,
maaURL: idFile.AttestationURL,
maaURL: attestationURL,
force: force,
}, nil
}
func getStateDiskKeyFunc(masterKey, salt []byte) func(uuid string) ([]byte, error) {
return func(uuid string) ([]byte, error) {
return crypto.DeriveKey(masterKey, salt, []byte(crypto.DEKPrefix+uuid), crypto.StateDiskKeyLength)
}
}

View File

@ -15,12 +15,13 @@ import (
"testing"
"time"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto"
"github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/crypto/testvector"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
@ -186,14 +187,14 @@ func TestParseRecoverFlags(t *testing.T) {
testCases := map[string]struct {
args []string
wantFlags recoverFlags
writeIDFile bool
writeStateFile bool
wantErr bool
}{
"no flags": {
wantFlags: recoverFlags{
endpoint: "192.0.2.42:9999",
},
writeIDFile: true,
writeStateFile: true,
},
"no flags, no ID file": {
wantFlags: recoverFlags{
@ -224,8 +225,12 @@ func TestParseRecoverFlags(t *testing.T) {
require.NoError(cmd.ParseFlags(tc.args))
fileHandler := file.NewHandler(afero.NewMemMapFs())
if tc.writeIDFile {
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, &clusterid.File{IP: "192.0.2.42"}))
if tc.writeStateFile {
require.NoError(
state.New().
SetInfrastructure(state.Infrastructure{ClusterEndpoint: "192.0.2.42"}).
WriteToFile(fileHandler, constants.StateFilename),
)
}
r := &recoverCmd{log: logger.NewTest(t)}
flags, err := r.parseRecoverFlags(cmd, fileHandler)
@ -309,6 +314,12 @@ func TestDeriveStateDiskKey(t *testing.T) {
}
}
func getStateDiskKeyFunc(masterKey, salt []byte) func(uuid string) ([]byte, error) {
return func(uuid string) ([]byte, error) {
return crypto.DeriveKey(masterKey, salt, []byte(crypto.DEKPrefix+uuid), crypto.StateDiskKeyLength)
}
}
type stubRecoveryServer struct {
recoverError error
recoverproto.UnimplementedAPIServer

View File

@ -84,6 +84,7 @@ func terminate(cmd *cobra.Command, terminator cloudTerminator, fileHandler file.
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", pf.PrefixPrintablePath(constants.AdminConfFilename)))
}
// TODO(msanft): Once v2.12.0 is released, remove the ID-file-removal here.
if err := fileHandler.Remove(constants.ClusterIDsFilename); err != nil && !errors.Is(err, fs.ErrNotExist) {
removeErr = errors.Join(err, fmt.Errorf("failed to remove file: '%s', please remove it manually", pf.PrefixPrintablePath(constants.ClusterIDsFilename)))
}

View File

@ -11,8 +11,7 @@ import (
"errors"
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero"
@ -47,65 +46,64 @@ func TestTerminateCmdArgumentValidation(t *testing.T) {
}
func TestTerminate(t *testing.T) {
setupFs := func(require *require.Assertions, idFile clusterid.File) afero.Fs {
setupFs := func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone))
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone))
require.NoError(fileHandler.Write(constants.StateFilename, []byte{3, 4}, file.OptNone))
require.NoError(stateFile.WriteToFile(fileHandler, constants.StateFilename))
return fs
}
someErr := errors.New("failed")
testCases := map[string]struct {
idFile clusterid.File
stateFile *state.State
yesFlag bool
stdin string
setupFs func(*require.Assertions, clusterid.File) afero.Fs
setupFs func(*require.Assertions, *state.State) afero.Fs
terminator spyCloudTerminator
wantErr bool
wantAbort bool
}{
"success": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
stateFile: state.New(),
setupFs: setupFs,
terminator: &stubCloudTerminator{},
yesFlag: true,
},
"interactive": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
stateFile: state.New(),
setupFs: setupFs,
terminator: &stubCloudTerminator{},
stdin: "yes\n",
},
"interactive abort": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
stateFile: state.New(),
setupFs: setupFs,
terminator: &stubCloudTerminator{},
stdin: "no\n",
wantAbort: true,
},
"files to remove do not exist": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs {
stateFile: state.New(),
setupFs: func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile, file.OptNone))
require.NoError(stateFile.WriteToFile(fileHandler, constants.StateFilename))
return fs
},
terminator: &stubCloudTerminator{},
yesFlag: true,
},
"terminate error": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
stateFile: state.New(),
setupFs: setupFs,
terminator: &stubCloudTerminator{terminateErr: someErr},
yesFlag: true,
wantErr: true,
},
"missing id file does not error": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs {
stateFile: state.New(),
setupFs: func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := afero.NewMemMapFs()
fileHandler := file.NewHandler(fs)
require.NoError(fileHandler.Write(constants.AdminConfFilename, []byte{1, 2}, file.OptNone))
@ -115,9 +113,9 @@ func TestTerminate(t *testing.T) {
yesFlag: true,
},
"remove file fails": {
idFile: clusterid.File{CloudProvider: cloudprovider.GCP},
setupFs: func(require *require.Assertions, idFile clusterid.File) afero.Fs {
fs := setupFs(require, idFile)
stateFile: state.New(),
setupFs: func(require *require.Assertions, stateFile *state.State) afero.Fs {
fs := setupFs(require, stateFile)
return afero.NewReadOnlyFs(fs)
},
terminator: &stubCloudTerminator{},
@ -141,7 +139,7 @@ func TestTerminate(t *testing.T) {
cmd.Flags().String("workspace", "", "")
require.NotNil(tc.setupFs)
fileHandler := file.NewHandler(tc.setupFs(require, tc.idFile))
fileHandler := file.NewHandler(tc.setupFs(require, tc.stateFile))
if tc.yesFlag {
require.NoError(cmd.Flags().Set("yes", "true"))
@ -159,8 +157,6 @@ func TestTerminate(t *testing.T) {
assert.True(tc.terminator.Called())
_, err = fileHandler.Stat(constants.AdminConfFilename)
assert.Error(err)
_, err = fileHandler.Stat(constants.ClusterIDsFilename)
assert.Error(err)
_, err = fileHandler.Stat(constants.StateFilename)
assert.Error(err)
}

View File

@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"path/filepath"
"strings"
"time"
@ -145,6 +146,10 @@ type upgradeApplyCmd struct {
log debugLog
}
type infrastructureShower interface {
ShowInfrastructure(ctx context.Context, provider cloudprovider.Provider) (state.Infrastructure, error)
}
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, flags upgradeApplyFlags) error {
conf, err := config.New(u.fileHandler, constants.ConfigFilename, u.configFetcher, flags.force)
var configValidationErr *config.ValidationError
@ -172,11 +177,24 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
return err
}
stateFile, err := state.ReadFromFile(u.fileHandler, constants.StateFilename)
// TODO(msanft): Remove reading from idFile once v2.12.0 is released and read from state file directly.
// For now, this is only here to ensure upgradability from an id-file to a state file version.
if errors.Is(err, fs.ErrNotExist) {
u.log.Debugf("%s does not exist in current directory, falling back to reading from %s",
constants.StateFilename, constants.ClusterIDsFilename)
var idFile clusterid.File
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
return fmt.Errorf("reading cluster ID file: %w", err)
}
conf.UpdateMAAURL(idFile.AttestationURL)
// Convert id-file to state file
stateFile = state.NewFromIDFile(idFile, conf)
if stateFile.Infrastructure.Azure != nil {
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
} else if err != nil {
return fmt.Errorf("reading state file: %w", err)
}
// Apply migrations necessary for the upgrade
if err := migrateFrom2_10(cmd.Context(), u.kubeUpgrader); err != nil {
@ -186,37 +204,55 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
return fmt.Errorf("applying migration for upgrading from v2.11: %w", err)
}
if err := u.confirmAndUpgradeAttestationConfig(cmd, conf.GetAttestationConfig(), idFile.MeasurementSalt, flags); err != nil {
if err := u.confirmAndUpgradeAttestationConfig(cmd, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt, flags); err != nil {
return fmt.Errorf("upgrading measurements: %w", err)
}
var infraState state.Infrastructure
// If infrastructure phase is skipped, we expect the new infrastructure
// to be in the Terraform configuration already. Otherwise, perform
// the Terraform migrations.
var postMigrationInfraState state.Infrastructure
if flags.skipPhases.contains(skipInfrastructurePhase) {
infraState, err = u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
// TODO(msanft): Once v2.12.0 is released, this should be removed and the state should be read
// from the state file instead, as it will be the only source of truth for the cluster's infrastructure.
postMigrationInfraState, err = u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
if err != nil {
return fmt.Errorf("getting infra state: %w", err)
return fmt.Errorf("getting Terraform state: %w", err)
}
} else {
infraState, err = u.migrateTerraform(cmd, conf, upgradeDir, flags)
postMigrationInfraState, err = u.migrateTerraform(cmd, conf, upgradeDir, flags)
if err != nil {
return fmt.Errorf("performing Terraform migrations: %w", err)
}
}
// reload idFile after terraform migration
// it might have been updated by the migration
if err := u.fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
return fmt.Errorf("reading updated cluster ID file: %w", err)
// Merge the pre-upgrade state with the post-migration infrastructure values
if _, err := stateFile.Merge(
// temporary state with post-migration infrastructure values
state.New().SetInfrastructure(postMigrationInfraState),
); err != nil {
return fmt.Errorf("merging pre-upgrade state with post-migration infrastructure values: %w", err)
}
state := state.NewState(infraState)
// TODO(elchead): AB#3424 move this to updateClusterIDFile and correctly handle existing state when writing state
if err := u.fileHandler.WriteYAML(constants.StateFilename, state, file.OptOverwrite); err != nil {
// Write the post-migration state to disk
if err := stateFile.WriteToFile(u.fileHandler, constants.StateFilename); err != nil {
return fmt.Errorf("writing state file: %w", err)
}
// TODO(msanft): Remove this after v2.12.0 is released, as we do not support
// the id-file starting from v2.13.0.
err = u.fileHandler.RenameFile(constants.ClusterIDsFilename, constants.ClusterIDsFilename+".old")
if !errors.Is(err, fs.ErrNotExist) && err != nil {
return fmt.Errorf("removing cluster ID file: %w", err)
}
// extend the clusterConfig cert SANs with any of the supported endpoints:
// - (legacy) public IP
// - fallback endpoint
// - custom (user-provided) endpoint
sans := append([]string{idFile.IP, conf.CustomEndpoint}, idFile.APIServerCertSANs...)
// TODO(msanft): Remove the comment below once v2.12.0 is released.
// At this point, state file and id-file should have been merged, so we can use the state file.
sans := append([]string{stateFile.Infrastructure.ClusterEndpoint, conf.CustomEndpoint}, stateFile.Infrastructure.APIServerCertSANs...)
if err := u.kubeUpgrader.ExtendClusterConfigCertSANs(cmd.Context(), sans); err != nil {
return fmt.Errorf("extending cert SANs: %w", err)
}
@ -228,7 +264,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
var upgradeErr *compatibility.InvalidUpgradeError
if !flags.skipPhases.contains(skipHelmPhase) {
err = u.handleServiceUpgrade(cmd, conf, idFile, infraState, upgradeDir, flags)
err = u.handleServiceUpgrade(cmd, conf, stateFile, upgradeDir, flags)
switch {
case errors.As(err, &upgradeErr):
cmd.PrintErrln(err)
@ -269,14 +305,16 @@ func diffAttestationCfg(currentAttestationCfg config.AttestationCfg, newAttestat
}
// migrateTerraform checks if the Constellation version the cluster is being upgraded to requires a migration
// of cloud resources with Terraform. If so, the migration is performed.
func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Config, upgradeDir string, flags upgradeApplyFlags,
) (res state.Infrastructure, err error) {
// of cloud resources with Terraform. If so, the migration is performed and the post-migration infrastructure state is returned.
// If no migration is required, the current (pre-upgrade) infrastructure state is returned.
func (u *upgradeApplyCmd) migrateTerraform(
cmd *cobra.Command, conf *config.Config, upgradeDir string, flags upgradeApplyFlags,
) (state.Infrastructure, error) {
u.log.Debugf("Planning Terraform migrations")
vars, err := cloudcmd.TerraformUpgradeVars(conf)
if err != nil {
return res, fmt.Errorf("parsing upgrade variables: %w", err)
return state.Infrastructure{}, fmt.Errorf("parsing upgrade variables: %w", err)
}
u.log.Debugf("Using Terraform variables:\n%v", vars)
@ -292,60 +330,46 @@ func (u *upgradeApplyCmd) migrateTerraform(cmd *cobra.Command, conf *config.Conf
hasDiff, err := u.clusterUpgrader.PlanClusterUpgrade(cmd.Context(), cmd.OutOrStdout(), vars, conf.GetProvider())
if err != nil {
return res, fmt.Errorf("planning terraform migrations: %w", err)
return state.Infrastructure{}, fmt.Errorf("planning terraform migrations: %w", err)
}
if !hasDiff {
u.log.Debugf("No Terraform diff detected")
return u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
}
if hasDiff {
// If there are any Terraform migrations to apply, ask for confirmation
fmt.Fprintln(cmd.OutOrStdout(), "The upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template. Please manually review the suggested changes below.")
if !flags.yes {
ok, err := askToConfirm(cmd, "Do you want to apply the Terraform migrations?")
if err != nil {
return res, fmt.Errorf("asking for confirmation: %w", err)
return state.Infrastructure{}, fmt.Errorf("asking for confirmation: %w", err)
}
if !ok {
cmd.Println("Aborting upgrade.")
// User doesn't expect to see any changes in his workspace after aborting an "upgrade apply",
// therefore, roll back to the backed up state.
if err := u.clusterUpgrader.RestoreClusterWorkspace(); err != nil {
return res, fmt.Errorf(
return state.Infrastructure{}, fmt.Errorf(
"restoring Terraform workspace: %w, restore the Terraform workspace manually from %s ",
err,
filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir),
)
}
return res, fmt.Errorf("cluster upgrade aborted by user")
return state.Infrastructure{}, fmt.Errorf("cluster upgrade aborted by user")
}
}
u.log.Debugf("Applying Terraform migrations")
infraState, err := u.clusterUpgrader.ApplyClusterUpgrade(cmd.Context(), conf.GetProvider())
if err != nil {
return infraState, fmt.Errorf("applying terraform migrations: %w", err)
return state.Infrastructure{}, fmt.Errorf("applying terraform migrations: %w", err)
}
// Apply possible updates to cluster ID file
if err := updateClusterIDFile(infraState, u.fileHandler); err != nil {
return infraState, fmt.Errorf("merging cluster ID files: %w", err)
}
cmd.Printf("Terraform migrations applied successfully and output written to: %s\n"+
cmd.Printf("Infrastructure migrations applied successfully and output written to: %s\n"+
"A backup of the pre-upgrade state has been written to: %s\n",
flags.pf.PrefixPrintablePath(constants.ClusterIDsFilename),
flags.pf.PrefixPrintablePath(constants.StateFilename),
flags.pf.PrefixPrintablePath(filepath.Join(upgradeDir, constants.TerraformUpgradeBackupDir)),
)
} else {
u.log.Debugf("No Terraform diff detected")
}
u.log.Debugf("No Terraform diff detected")
infraState, err := u.clusterShower.ShowInfrastructure(cmd.Context(), conf.GetProvider())
if err != nil {
return infraState, fmt.Errorf("getting Terraform output: %w", err)
}
state := state.NewState(infraState)
// TODO(elchead): AB#3424 move this to updateClusterIDFile and correctly handle existing state when writing state
if err := u.fileHandler.WriteYAML(constants.StateFilename, state, file.OptOverwrite); err != nil {
return infraState, fmt.Errorf("writing state file: %w", err)
}
return infraState, nil
}
@ -408,12 +432,12 @@ func (u *upgradeApplyCmd) confirmAndUpgradeAttestationConfig(
if err := u.kubeUpgrader.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt); err != nil {
return fmt.Errorf("updating attestation config: %w", err)
}
cmd.Println("Successfully update the cluster's attestation config")
cmd.Println("Successfully updated the cluster's attestation config")
return nil
}
func (u *upgradeApplyCmd) handleServiceUpgrade(
cmd *cobra.Command, conf *config.Config, idFile clusterid.File, infra state.Infrastructure,
cmd *cobra.Command, conf *config.Config, stateFile *state.State,
upgradeDir string, flags upgradeApplyFlags,
) error {
var secret uri.MasterSecret
@ -432,8 +456,7 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(
prepareApply := func(allowDestructive bool) (helm.Applier, bool, error) {
options.AllowDestructive = allowDestructive
executor, includesUpgrades, err := u.helmApplier.PrepareApply(conf, idFile, options,
infra, serviceAccURI, secret)
executor, includesUpgrades, err := u.helmApplier.PrepareApply(conf, stateFile, options, serviceAccURI, secret)
var upgradeErr *compatibility.InvalidUpgradeError
switch {
case errors.As(err, &upgradeErr):
@ -587,29 +610,6 @@ func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
}, nil
}
func updateClusterIDFile(infraState state.Infrastructure, fileHandler file.Handler) error {
newIDFile := clusterid.File{
InitSecret: []byte(infraState.InitSecret),
IP: infraState.ClusterEndpoint,
APIServerCertSANs: infraState.APIServerCertSANs,
UID: infraState.UID,
}
if infraState.Azure != nil {
newIDFile.AttestationURL = infraState.Azure.AttestationURL
}
idFile := &clusterid.File{}
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, idFile); err != nil {
return fmt.Errorf("reading %s: %w", constants.ClusterIDsFilename, err)
}
if err := fileHandler.WriteJSON(constants.ClusterIDsFilename, idFile.Merge(newIDFile), file.OptOverwrite); err != nil {
return fmt.Errorf("writing %s: %w", constants.ClusterIDsFilename, err)
}
return nil
}
type upgradeApplyFlags struct {
pf pathprefix.PathPrefixer
yes bool

View File

@ -34,10 +34,37 @@ import (
)
func TestUpgradeApply(t *testing.T) {
defaultState := state.New().
SetInfrastructure(state.Infrastructure{
APIServerCertSANs: []string{},
UID: "uid",
Name: "kubernetes-uid", // default test cfg uses "kubernetes" prefix
InitSecret: []byte{0x42},
}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}})
defaultIDFile := clusterid.File{
MeasurementSalt: []byte{0x41},
UID: "uid",
InitSecret: []byte{0x42},
}
fsWithIDFile := func() file.Handler {
fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(t, fh.WriteJSON(constants.ClusterIDsFilename, defaultIDFile))
return fh
}
fsWithStateFile := func() file.Handler {
fh := file.NewHandler(afero.NewMemMapFs())
require.NoError(t, fh.WriteYAML(constants.StateFilename, defaultState))
return fh
}
testCases := map[string]struct {
helmUpgrader helmApplier
kubeUpgrader *stubKubernetesUpgrader
fh func() file.Handler
fhAssertions func(require *require.Assertions, assert *assert.Assertions, fh file.Handler)
terraformUpgrader clusterUpgrader
infrastructureShower *stubShowInfrastructure
wantErr bool
customK8sVersion string
flags upgradeApplyFlags
@ -48,6 +75,43 @@ func TestUpgradeApply(t *testing.T) {
helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
fhAssertions: func(require *require.Assertions, assert *assert.Assertions, fh file.Handler) {
gotState, err := state.ReadFromFile(fh, constants.StateFilename)
require.NoError(err)
assert.Equal("v1", gotState.Version)
assert.Equal(defaultState, gotState)
},
},
"fall back to id file": {
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithIDFile,
fhAssertions: func(require *require.Assertions, assert *assert.Assertions, fh file.Handler) {
gotState, err := state.ReadFromFile(fh, constants.StateFilename)
require.NoError(err)
assert.Equal("v1", gotState.Version)
assert.Equal(defaultState, gotState)
var oldIDFile clusterid.File
err = fh.ReadJSON(constants.ClusterIDsFilename+".old", &oldIDFile)
assert.NoError(err)
assert.Equal(defaultIDFile, oldIDFile)
},
},
"id file and state file do not exist": {
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: func() file.Handler {
return file.NewHandler(afero.NewMemMapFs())
},
wantErr: true,
},
"nodeVersion some error": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -58,6 +122,8 @@ func TestUpgradeApply(t *testing.T) {
terraformUpgrader: &stubTerraformUpgrader{},
wantErr: true,
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"nodeVersion in progress error": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -67,6 +133,8 @@ func TestUpgradeApply(t *testing.T) {
helmUpgrader: stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"helm other error": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -76,6 +144,8 @@ func TestUpgradeApply(t *testing.T) {
terraformUpgrader: &stubTerraformUpgrader{},
wantErr: true,
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"abort": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -85,6 +155,8 @@ func TestUpgradeApply(t *testing.T) {
terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true},
wantErr: true,
stdin: "no\n",
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"abort, restore terraform err": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -94,6 +166,8 @@ func TestUpgradeApply(t *testing.T) {
terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true, rollbackWorkspaceErr: assert.AnError},
wantErr: true,
stdin: "no\n",
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"plan terraform error": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -103,6 +177,8 @@ func TestUpgradeApply(t *testing.T) {
terraformUpgrader: &stubTerraformUpgrader{planTerraformErr: assert.AnError},
wantErr: true,
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"apply terraform error": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -115,6 +191,8 @@ func TestUpgradeApply(t *testing.T) {
},
wantErr: true,
flags: upgradeApplyFlags{yes: true},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"outdated K8s patch version": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -128,7 +206,8 @@ func TestUpgradeApply(t *testing.T) {
return semver.NewFromInt(v.Major(), v.Minor(), v.Patch()-1, "").String()
}(),
flags: upgradeApplyFlags{yes: true},
wantErr: false,
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"outdated K8s version": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -139,6 +218,8 @@ func TestUpgradeApply(t *testing.T) {
customK8sVersion: "v1.20.0",
flags: upgradeApplyFlags{yes: true},
wantErr: true,
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"skip all upgrade phases": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -150,6 +231,24 @@ func TestUpgradeApply(t *testing.T) {
skipPhases: []skipPhase{skipInfrastructurePhase, skipHelmPhase, skipK8sPhase, skipImagePhase},
yes: true,
},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
"show state err": {
kubeUpgrader: &stubKubernetesUpgrader{
currentConfig: config.DefaultForAzureSEVSNP(),
},
helmUpgrader: &stubApplier{},
terraformUpgrader: &stubTerraformUpgrader{},
flags: upgradeApplyFlags{
skipPhases: []skipPhase{skipInfrastructurePhase},
yes: true,
},
infrastructureShower: &stubShowInfrastructure{
showInfraErr: assert.AnError,
},
wantErr: true,
fh: fsWithStateFile,
},
"skip all phases except node upgrade": {
kubeUpgrader: &stubKubernetesUpgrader{
@ -161,6 +260,8 @@ func TestUpgradeApply(t *testing.T) {
skipPhases: []skipPhase{skipInfrastructurePhase, skipHelmPhase, skipK8sPhase},
yes: true,
},
infrastructureShower: &stubShowInfrastructure{},
fh: fsWithStateFile,
},
}
@ -171,15 +272,13 @@ func TestUpgradeApply(t *testing.T) {
cmd := newUpgradeApplyCmd()
cmd.SetIn(bytes.NewBufferString(tc.stdin))
handler := file.NewHandler(afero.NewMemMapFs())
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
if tc.customK8sVersion != "" {
cfg.KubernetesVersion = versions.ValidK8sVersion(tc.customK8sVersion)
}
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
require.NoError(handler.WriteJSON(constants.ClusterIDsFilename, clusterid.File{MeasurementSalt: []byte("measurementSalt")}))
require.NoError(handler.WriteJSON(constants.MasterSecretFilename, uri.MasterSecret{}))
fh := tc.fh()
require.NoError(fh.WriteYAML(constants.ConfigFilename, cfg))
require.NoError(fh.WriteJSON(constants.MasterSecretFilename, uri.MasterSecret{}))
upgrader := upgradeApplyCmd{
kubeUpgrader: tc.kubeUpgrader,
@ -187,8 +286,8 @@ func TestUpgradeApply(t *testing.T) {
clusterUpgrader: tc.terraformUpgrader,
log: logger.NewTest(t),
configFetcher: stubAttestationFetcher{},
clusterShower: &stubShowInfrastructure{},
fileHandler: handler,
clusterShower: tc.infrastructureShower,
fileHandler: fh,
}
err := upgrader.upgradeApply(cmd, "test", tc.flags)
@ -200,14 +299,9 @@ func TestUpgradeApply(t *testing.T) {
assert.Equal(!tc.flags.skipPhases.contains(skipImagePhase), tc.kubeUpgrader.calledNodeUpgrade,
"incorrect node upgrade skipping behavior")
var gotState state.State
expectedState := state.Infrastructure{
APIServerCertSANs: []string{},
Azure: &state.Azure{},
if tc.fhAssertions != nil {
tc.fhAssertions(require, assert, fh)
}
require.NoError(handler.ReadYAML(constants.StateFilename, &gotState))
assert.Equal("v1", gotState.Version)
assert.Equal(expectedState, gotState.Infrastructure)
})
}
}
@ -308,9 +402,17 @@ type mockApplier struct {
mock.Mock
}
func (m *mockApplier) PrepareApply(cfg *config.Config, clusterID clusterid.File,
helmOpts helm.Options, infraState state.Infrastructure, str string, masterSecret uri.MasterSecret,
func (m *mockApplier) PrepareApply(cfg *config.Config, stateFile *state.State,
helmOpts helm.Options, str string, masterSecret uri.MasterSecret,
) (helm.Applier, bool, error) {
args := m.Called(cfg, clusterID, helmOpts, infraState, str, masterSecret)
args := m.Called(cfg, stateFile, helmOpts, str, masterSecret)
return args.Get(0).(helm.Applier), args.Bool(1), args.Error(2)
}
type stubShowInfrastructure struct {
showInfraErr error
}
func (s *stubShowInfrastructure) ShowInfrastructure(context.Context, cloudprovider.Provider) (state.Infrastructure, error) {
return state.Infrastructure{}, s.showInfraErr
}

View File

@ -26,8 +26,8 @@ import (
tpmProto "github.com/google/go-tpm-tools/proto/tpm"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
"github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -54,7 +54,7 @@ func NewVerifyCmd() *cobra.Command {
Use: "verify",
Short: "Verify the confidential properties of a Constellation cluster",
Long: "Verify the confidential properties of a Constellation cluster.\n" +
"If arguments aren't specified, values are read from `" + constants.ClusterIDsFilename + "`.",
"If arguments aren't specified, values are read from `" + constants.StateFilename + "`.",
Args: cobra.ExactArgs(0),
RunE: runVerify,
}
@ -204,27 +204,36 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
}
c.log.Debugf("Flag 'output' set to %t", output)
var idFile clusterid.File
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil && !errors.Is(err, afero.ErrFileNotFound) {
return verifyFlags{}, fmt.Errorf("reading cluster ID file: %w", err)
// Get empty values from state file
stateFile, err := state.ReadFromFile(fileHandler, constants.StateFilename)
isFileNotFound := errors.Is(err, afero.ErrFileNotFound)
if isFileNotFound {
c.log.Debugf("State file %q not found, using empty state", pf.PrefixPrintablePath(constants.StateFilename))
stateFile = state.New() // error compat
} else if err != nil {
return verifyFlags{}, fmt.Errorf("reading state file: %w", err)
}
// Get empty values from ID file
emptyEndpoint := endpoint == ""
emptyIDs := ownerID == "" && clusterID == ""
if emptyEndpoint || emptyIDs {
c.log.Debugf("Trying to supplement empty flag values from %q", pf.PrefixPrintablePath(constants.ClusterIDsFilename))
c.log.Debugf("Trying to supplement empty flag values from %q", pf.PrefixPrintablePath(constants.StateFilename))
if emptyEndpoint {
cmd.PrintErrf("Using endpoint from %q. Specify --node-endpoint to override this.\n", pf.PrefixPrintablePath(constants.ClusterIDsFilename))
endpoint = idFile.IP
cmd.PrintErrf("Using endpoint from %q. Specify --node-endpoint to override this.\n", pf.PrefixPrintablePath(constants.StateFilename))
endpoint = stateFile.Infrastructure.ClusterEndpoint
}
if emptyIDs {
cmd.PrintErrf("Using ID from %q. Specify --cluster-id to override this.\n", pf.PrefixPrintablePath(constants.ClusterIDsFilename))
ownerID = idFile.OwnerID
clusterID = idFile.ClusterID
cmd.PrintErrf("Using ID from %q. Specify --cluster-id to override this.\n", pf.PrefixPrintablePath(constants.StateFilename))
ownerID = stateFile.ClusterValues.OwnerID
clusterID = stateFile.ClusterValues.ClusterID
}
}
var attestationURL string
if stateFile.Infrastructure.Azure != nil {
attestationURL = stateFile.Infrastructure.Azure.AttestationURL
}
// Validate
if ownerID == "" && clusterID == "" {
return verifyFlags{}, errors.New("cluster-id not provided to verify the cluster")
@ -239,8 +248,8 @@ func (c *verifyCmd) parseVerifyFlags(cmd *cobra.Command, fileHandler file.Handle
pf: pf,
ownerID: ownerID,
clusterID: clusterID,
maaURL: idFile.AttestationURL,
output: output,
maaURL: attestationURL,
force: force,
}, nil
}

View File

@ -17,7 +17,7 @@ import (
"strings"
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/atls"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
@ -48,7 +48,7 @@ func TestVerify(t *testing.T) {
formatter *stubAttDocFormatter
nodeEndpointFlag string
clusterIDFlag string
idFile *clusterid.File
stateFile *state.State
wantEndpoint string
skipConfigCreation bool
wantErr bool
@ -84,11 +84,11 @@ func TestVerify(t *testing.T) {
formatter: &stubAttDocFormatter{},
wantErr: true,
},
"endpoint from id file": {
"endpoint from state file": {
provider: cloudprovider.GCP,
clusterIDFlag: zeroBase64,
protoClient: &stubVerifyClient{},
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC),
formatter: &stubAttDocFormatter{},
},
@ -97,7 +97,7 @@ func TestVerify(t *testing.T) {
nodeEndpointFlag: "192.0.2.2:1234",
clusterIDFlag: zeroBase64,
protoClient: &stubVerifyClient{},
idFile: &clusterid.File{IP: "192.0.2.1"},
stateFile: &state.State{Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.1"}},
wantEndpoint: "192.0.2.2:1234",
formatter: &stubAttDocFormatter{},
},
@ -115,11 +115,11 @@ func TestVerify(t *testing.T) {
formatter: &stubAttDocFormatter{},
wantErr: true,
},
"use owner id from id file": {
"use owner id from state file": {
provider: cloudprovider.GCP,
nodeEndpointFlag: "192.0.2.1:1234",
protoClient: &stubVerifyClient{},
idFile: &clusterid.File{OwnerID: zeroBase64},
stateFile: &state.State{ClusterValues: state.ClusterValues{OwnerID: zeroBase64}},
wantEndpoint: "192.0.2.1:1234",
formatter: &stubAttDocFormatter{},
},
@ -180,8 +180,8 @@ func TestVerify(t *testing.T) {
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider)
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg))
}
if tc.idFile != nil {
require.NoError(fileHandler.WriteJSON(constants.ClusterIDsFilename, tc.idFile, file.OptNone))
if tc.stateFile != nil {
require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename))
}
v := &verifyCmd{log: logger.NewTest(t)}

View File

@ -416,7 +416,6 @@ go_library(
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/helm",
visibility = ["//cli:__subpackages__"],
deps = [
"//cli/internal/clusterid",
"//cli/internal/helm/imageversion",
"//cli/internal/state",
"//internal/cloud/azureshared",
@ -458,7 +457,6 @@ go_test(
data = glob(["testdata/**"]),
embed = [":helm"],
deps = [
"//cli/internal/clusterid",
"//cli/internal/state",
"//internal/attestation/measurements",
"//internal/cloud/azureshared",

View File

@ -32,7 +32,6 @@ import (
"context"
"fmt"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
@ -87,10 +86,10 @@ type Options struct {
// PrepareApply loads the charts and returns the executor to apply them.
// TODO(elchead): remove validK8sVersion by putting ValidK8sVersion into config.Config, see AB#3374.
func (h Client) PrepareApply(
conf *config.Config, idFile clusterid.File,
flags Options, infra state.Infrastructure, serviceAccURI string, masterSecret uri.MasterSecret,
conf *config.Config, stateFile *state.State,
flags Options, serviceAccURI string, masterSecret uri.MasterSecret,
) (Applier, bool, error) {
releases, err := h.loadReleases(conf, masterSecret, idFile, flags, infra, serviceAccURI)
releases, err := h.loadReleases(conf, masterSecret, stateFile, flags, serviceAccURI)
if err != nil {
return nil, false, fmt.Errorf("loading Helm releases: %w", err)
}
@ -101,12 +100,11 @@ func (h Client) PrepareApply(
func (h Client) loadReleases(
conf *config.Config, secret uri.MasterSecret,
idFile clusterid.File, flags Options, infra state.Infrastructure, serviceAccURI string,
stateFile *state.State, flags Options, serviceAccURI string,
) ([]Release, error) {
helmLoader := newLoader(conf, idFile, h.cliVersion)
helmLoader := newLoader(conf, stateFile, h.cliVersion)
h.log.Debugf("Created new Helm loader")
return helmLoader.loadReleases(flags.Conformance, flags.HelmWaitMode, secret,
serviceAccURI, infra)
return helmLoader.loadReleases(flags.Conformance, flags.HelmWaitMode, secret, serviceAccURI)
}
// Applier runs the Helm actions.

View File

@ -10,7 +10,6 @@ import (
"errors"
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
@ -208,8 +207,10 @@ func TestHelmApply(t *testing.T) {
options.AllowDestructive = tc.allowDestructive
ex, includesUpgrade, err := sut.PrepareApply(cfg,
clusterid.File{UID: "testuid", MeasurementSalt: []byte("measurementSalt")}, options,
fakeInfraOutput(csp), fakeServiceAccURI(csp),
state.New().
SetInfrastructure(state.Infrastructure{UID: "testuid"}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}}),
options, fakeServiceAccURI(csp),
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")})
var upgradeErr *compatibility.InvalidUpgradeError
if tc.expectError {
@ -225,17 +226,6 @@ func TestHelmApply(t *testing.T) {
}
}
func fakeInfraOutput(csp cloudprovider.Provider) state.Infrastructure {
switch csp {
case cloudprovider.AWS:
return state.Infrastructure{}
case cloudprovider.GCP:
return state.Infrastructure{GCP: &state.GCP{}}
default:
panic("invalid csp")
}
}
func getActionReleaseNames(actions []applyAction) []string {
releaseActionNames := []string{}
for _, action := range actions {

View File

@ -19,7 +19,6 @@ import (
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/helm/imageversion"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -72,12 +71,12 @@ type chartLoader struct {
constellationOperatorImage string
nodeMaintenanceOperatorImage string
clusterName string
idFile clusterid.File
stateFile *state.State
cliVersion semver.Semver
}
// newLoader creates a new ChartLoader.
func newLoader(config *config.Config, idFile clusterid.File, cliVersion semver.Semver) *chartLoader {
func newLoader(config *config.Config, stateFile *state.State, cliVersion semver.Semver) *chartLoader {
// TODO(malt3): Allow overriding container image registry + prefix for all images
// (e.g. for air-gapped environments).
var ccmImage, cnmImage string
@ -97,7 +96,7 @@ func newLoader(config *config.Config, idFile clusterid.File, cliVersion semver.S
return &chartLoader{
cliVersion: cliVersion,
csp: csp,
idFile: idFile,
stateFile: stateFile,
ccmImage: ccmImage,
azureCNMImage: cnmImage,
config: config,
@ -120,13 +119,13 @@ type releaseApplyOrder []Release
// loadReleases loads the embedded helm charts and returns them as a HelmReleases object.
func (i *chartLoader) loadReleases(conformanceMode bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret,
serviceAccURI string, infra state.Infrastructure,
serviceAccURI string,
) (releaseApplyOrder, error) {
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
if err != nil {
return nil, fmt.Errorf("loading cilium: %w", err)
}
ciliumVals := extraCiliumValues(i.config.GetProvider(), conformanceMode, infra)
ciliumVals := extraCiliumValues(i.config.GetProvider(), conformanceMode, i.stateFile.Infrastructure)
ciliumRelease.Values = mergeMaps(ciliumRelease.Values, ciliumVals)
certManagerRelease, err := i.loadRelease(certManagerInfo, helmWaitMode)
@ -138,14 +137,14 @@ func (i *chartLoader) loadReleases(conformanceMode bool, helmWaitMode WaitMode,
if err != nil {
return nil, fmt.Errorf("loading operators: %w", err)
}
operatorRelease.Values = mergeMaps(operatorRelease.Values, extraOperatorValues(i.idFile.UID))
operatorRelease.Values = mergeMaps(operatorRelease.Values, extraOperatorValues(i.stateFile.Infrastructure.UID))
conServicesRelease, err := i.loadRelease(constellationServicesInfo, helmWaitMode)
if err != nil {
return nil, fmt.Errorf("loading constellation-services: %w", err)
}
svcVals, err := extraConstellationServicesValues(i.config, masterSecret, i.idFile.UID, serviceAccURI, infra)
svcVals, err := extraConstellationServicesValues(i.config, masterSecret, serviceAccURI, i.stateFile.Infrastructure)
if err != nil {
return nil, fmt.Errorf("extending constellation-services values: %w", err)
}
@ -216,7 +215,7 @@ func (i *chartLoader) loadRelease(info chartInfo, helmWaitMode WaitMode) (Releas
func (i *chartLoader) loadAWSLBControllerValues() map[string]any {
return map[string]any{
"clusterName": clusterid.GetClusterName(i.config, i.idFile),
"clusterName": i.stateFile.Infrastructure.Name,
"tolerations": controlPlaneTolerations,
"nodeSelector": controlPlaneNodeSelector,
}

View File

@ -22,7 +22,6 @@ import (
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/engine"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/state"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
@ -66,12 +65,23 @@ func TestLoadReleases(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
config := &config.Config{Provider: config.ProviderConfig{GCP: &config.GCPConfig{}}}
chartLoader := newLoader(config, clusterid.File{UID: "testuid", MeasurementSalt: []byte("measurementSalt")},
semver.NewFromInt(2, 10, 0, ""))
chartLoader := newLoader(
config,
state.New().
SetInfrastructure(state.Infrastructure{
GCP: &state.GCP{
ProjectID: "test-project-id",
IPCidrNode: "test-node-cidr",
IPCidrPod: "test-pod-cidr",
},
}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}}),
semver.NewFromInt(2, 10, 0, ""),
)
helmReleases, err := chartLoader.loadReleases(
true, WaitModeAtomic,
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")},
fakeServiceAccURI(cloudprovider.GCP), state.Infrastructure{GCP: &state.GCP{}},
fakeServiceAccURI(cloudprovider.GCP),
)
require.NoError(err)
for _, release := range helmReleases {
@ -85,7 +95,7 @@ func TestLoadAWSLoadBalancerValues(t *testing.T) {
sut := chartLoader{
config: &config.Config{Name: "testCluster"},
clusterName: "testCluster",
idFile: clusterid.File{UID: "testuid"},
stateFile: state.New().SetInfrastructure(state.Infrastructure{UID: "testuid", Name: "testCluster-testuid"}),
}
val := sut.loadAWSLBControllerValues()
assert.Equal(t, "testCluster-testuid", val["clusterName"])
@ -174,8 +184,8 @@ func TestConstellationServices(t *testing.T) {
tc.config, uri.MasterSecret{
Key: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
Salt: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
"uid", serviceAccURI, state.Infrastructure{
}, serviceAccURI, state.Infrastructure{
UID: "uid",
Azure: &state.Azure{},
GCP: &state.GCP{},
})

View File

@ -54,7 +54,7 @@ func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, ou
// extraConstellationServicesValues extends the given values map by some values depending on user input.
// Values set inside this function are only applied during init, not during upgrade.
func extraConstellationServicesValues(
cfg *config.Config, masterSecret uri.MasterSecret, uid, serviceAccURI string, output state.Infrastructure,
cfg *config.Config, masterSecret uri.MasterSecret, serviceAccURI string, output state.Infrastructure,
) (map[string]any, error) {
extraVals := map[string]any{}
extraVals["join-service"] = map[string]any{
@ -102,7 +102,7 @@ func extraConstellationServicesValues(
extraVals["ccm"] = map[string]any{
"GCP": map[string]any{
"projectID": output.GCP.ProjectID,
"uid": uid,
"uid": output.UID,
"secretData": string(rawKey),
"subnetworkPodCIDR": output.GCP.IPCidrPod,
},

View File

@ -1,8 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("//bazel/go:go_test.bzl", "go_test")
go_library(
name = "state",
srcs = ["state.go"],
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/state",
visibility = ["//cli:__subpackages__"],
deps = [
"//cli/internal/clusterid",
"//internal/config",
"//internal/file",
"@cat_dario_mergo//:mergo",
],
)
go_test(
name = "state_test",
srcs = ["state_test.go"],
embed = [":state"],
deps = [
"//cli/internal/clusterid",
"//internal/config",
"//internal/constants",
"//internal/file",
"@com_github_spf13_afero//:afero",
"@com_github_stretchr_testify//assert",
"@in_gopkg_yaml_v3//:yaml_v3",
],
)

View File

@ -7,31 +7,115 @@ SPDX-License-Identifier: AGPL-3.0-only
// package state defines the structure of the Constellation state file.
package state
import (
"fmt"
"dario.cat/mergo"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/file"
)
const (
// Version1 is the first version of the state file.
Version1 = "v1"
)
// ReadFromFile reads the state file at the given path and returns the state.
func ReadFromFile(fileHandler file.Handler, path string) (*State, error) {
state := &State{}
if err := fileHandler.ReadYAML(path, &state); err != nil {
return nil, fmt.Errorf("reading state file: %w", err)
}
return state, nil
}
// State describe the entire state to describe a Constellation cluster.
type State struct {
Version string `yaml:"version"`
Infrastructure Infrastructure `yaml:"infrastructure"`
ClusterValues ClusterValues `yaml:"clusterValues"`
}
// NewState creates a new state with the given infrastructure.
func NewState(Infrastructure Infrastructure) State {
return State{
// New creates a new cluster state (file).
func New() *State {
return &State{
Version: Version1,
Infrastructure: Infrastructure,
}
}
// NewFromIDFile creates a new cluster state file from the given ID file and config.
func NewFromIDFile(idFile clusterid.File, cfg *config.Config) *State {
s := New().
SetClusterValues(ClusterValues{
OwnerID: idFile.OwnerID,
ClusterID: idFile.ClusterID,
MeasurementSalt: idFile.MeasurementSalt,
}).
SetInfrastructure(Infrastructure{
UID: idFile.UID,
ClusterEndpoint: idFile.IP,
APIServerCertSANs: idFile.APIServerCertSANs,
InitSecret: idFile.InitSecret,
Name: clusterid.GetClusterName(cfg, idFile),
})
if idFile.AttestationURL != "" {
s.Infrastructure.Azure = &Azure{
AttestationURL: idFile.AttestationURL,
}
}
return s
}
// SetInfrastructure sets the infrastructure state.
func (s *State) SetInfrastructure(infrastructure Infrastructure) *State {
s.Infrastructure = infrastructure
return s
}
// SetClusterValues sets the cluster values.
func (s *State) SetClusterValues(clusterValues ClusterValues) *State {
s.ClusterValues = clusterValues
return s
}
// WriteToFile writes the state to the given path, overwriting any existing file.
func (s *State) WriteToFile(fileHandler file.Handler, path string) error {
if err := fileHandler.WriteYAML(path, s, file.OptMkdirAll, file.OptOverwrite); err != nil {
return fmt.Errorf("writing state file: %w", err)
}
return nil
}
// Merge merges the state information from other into the current state.
// If a field is set in both states, the value of the other state is used.
func (s *State) Merge(other *State) (*State, error) {
if err := mergo.Merge(s, other, mergo.WithOverride); err != nil {
return nil, fmt.Errorf("merging state file: %w", err)
}
return s, nil
}
// ClusterValues describe the (Kubernetes) cluster state, set during initialization of the cluster.
type ClusterValues struct {
// ClusterID is the unique identifier of the cluster.
ClusterID string `yaml:"clusterID"`
// OwnerID is the unique identifier of the owner of the cluster.
OwnerID string `yaml:"ownerID"`
// MeasurementSalt is the salt generated during cluster init.
MeasurementSalt []byte `yaml:"measurementSalt"`
}
// Infrastructure describe the state related to the cloud resources of the cluster.
type Infrastructure struct {
UID string `yaml:"uid"`
ClusterEndpoint string `yaml:"clusterEndpoint"`
InitSecret string `yaml:"initSecret"`
InitSecret []byte `yaml:"initSecret"`
APIServerCertSANs []string `yaml:"apiServerCertSANs"`
// Name is the name of the cluster.
Name string `yaml:"name"`
Azure *Azure `yaml:"azure,omitempty"`
GCP *GCP `yaml:"gcp,omitempty"`
}

View File

@ -0,0 +1,392 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package state
import (
"testing"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
var defaultState = &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "123",
ClusterEndpoint: "test-cluster-endpoint",
InitSecret: []byte{0x41},
APIServerCertSANs: []string{
"api-server-cert-san-test",
"api-server-cert-san-test-2",
},
Azure: &Azure{
ResourceGroup: "test-rg",
SubscriptionID: "test-sub",
NetworkSecurityGroupName: "test-nsg",
LoadBalancerName: "test-lb",
UserAssignedIdentity: "test-uami",
AttestationURL: "test-maaUrl",
},
GCP: &GCP{
ProjectID: "test-project",
IPCidrNode: "test-cidr-node",
IPCidrPod: "test-cidr-pod",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
OwnerID: "test-owner-id",
MeasurementSalt: []byte{0x41},
},
}
func TestWriteToFile(t *testing.T) {
prepareFs := func(existingFiles ...string) file.Handler {
fs := afero.NewMemMapFs()
fh := file.NewHandler(fs)
for _, name := range existingFiles {
if err := fh.Write(name, []byte{0x41}); err != nil {
t.Fatalf("failed to create file %s: %v", name, err)
}
}
return fh
}
testCases := map[string]struct {
state *State
fh file.Handler
wantErr bool
}{
"success": {
state: defaultState,
fh: prepareFs(),
},
"overwrite": {
state: defaultState,
fh: prepareFs(constants.StateFilename),
},
"empty state": {
state: &State{},
fh: prepareFs(),
},
"rofs": {
state: defaultState,
fh: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())),
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
err := tc.state.WriteToFile(tc.fh, constants.StateFilename)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(mustMarshalYaml(t, tc.state), mustReadFromFile(t, tc.fh))
}
})
}
}
func TestReadFromFile(t *testing.T) {
prepareFs := func(existingFiles map[string][]byte) file.Handler {
fs := afero.NewMemMapFs()
fh := file.NewHandler(fs)
for name, content := range existingFiles {
if err := fh.Write(name, content); err != nil {
t.Fatalf("failed to create file %s: %v", name, err)
}
}
return fh
}
testCases := map[string]struct {
existingFiles map[string][]byte
wantErr bool
}{
"success": {
existingFiles: map[string][]byte{
constants.StateFilename: mustMarshalYaml(t, defaultState),
},
},
"no state file present": {
existingFiles: map[string][]byte{},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
fh := prepareFs(tc.existingFiles)
state, err := ReadFromFile(fh, constants.StateFilename)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(tc.existingFiles[constants.StateFilename], mustMarshalYaml(t, state))
}
})
}
}
func mustMarshalYaml(t *testing.T, v any) []byte {
t.Helper()
b, err := yaml.Marshal(v)
if err != nil {
t.Fatalf("failed to marshal yaml: %v", err)
}
return b
}
func mustReadFromFile(t *testing.T, fh file.Handler) []byte {
t.Helper()
b, err := fh.Read(constants.StateFilename)
if err != nil {
t.Fatalf("failed to read file: %v", err)
}
return b
}
func TestMerge(t *testing.T) {
testCases := map[string]struct {
state *State
other *State
expected *State
wantErr bool
}{
"success": {
state: &State{
Infrastructure: Infrastructure{
ClusterEndpoint: "test-cluster-endpoint",
UID: "123",
},
},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
ClusterEndpoint: "test-cluster-endpoint",
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty state": {
state: &State{},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty other": {
state: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
other: &State{},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty state and other": {
state: &State{},
other: &State{},
expected: &State{},
},
"identical": {
state: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"nested pointer": {
state: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "123",
Azure: &Azure{
AttestationURL: "test-maaUrl",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
other: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
Azure: &Azure{
AttestationURL: "test-maaUrl-2",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
expected: &State{
Version: "v1",
Infrastructure: Infrastructure{
UID: "456",
Azure: &Azure{
AttestationURL: "test-maaUrl-2",
},
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
_, err := tc.state.Merge(tc.other)
if tc.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(tc.expected, tc.state)
}
})
}
}
func TestNewFromIDFile(t *testing.T) {
testCases := map[string]struct {
idFile clusterid.File
cfg *config.Config
expected *State
}{
"success": {
idFile: clusterid.File{
ClusterID: "test-cluster-id",
UID: "test-uid",
},
cfg: config.Default(),
expected: &State{
Version: Version1,
Infrastructure: Infrastructure{
UID: "test-uid",
Name: "constell-test-uid",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
"empty id file": {
idFile: clusterid.File{},
cfg: config.Default(),
expected: &State{Version: Version1, Infrastructure: Infrastructure{Name: "constell-"}},
},
"nested pointer": {
idFile: clusterid.File{
ClusterID: "test-cluster-id",
UID: "test-uid",
AttestationURL: "test-maaUrl",
},
cfg: config.Default(),
expected: &State{
Version: Version1,
Infrastructure: Infrastructure{
UID: "test-uid",
Azure: &Azure{
AttestationURL: "test-maaUrl",
},
Name: "constell-test-uid",
},
ClusterValues: ClusterValues{
ClusterID: "test-cluster-id",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
state := NewFromIDFile(tc.idFile, tc.cfg)
assert.Equal(tc.expected, state)
})
}
}

View File

@ -221,11 +221,21 @@ func (c *Client) ShowInfrastructure(ctx context.Context, provider cloudprovider.
return state.Infrastructure{}, errors.New("invalid type in uid output: not a string")
}
nameOutput, ok := tfState.Values.Outputs["name"]
if !ok {
return state.Infrastructure{}, errors.New("no name output found")
}
name, ok := nameOutput.Value.(string)
if !ok {
return state.Infrastructure{}, errors.New("invalid type in name output: not a string")
}
res := state.Infrastructure{
ClusterEndpoint: ip,
APIServerCertSANs: apiServerCertSANs,
InitSecret: secret,
InitSecret: []byte(secret),
UID: uid,
Name: name,
}
switch provider {

View File

@ -14,3 +14,7 @@ output "initSecret" {
value = random_password.initSecret.result
sensitive = true
}
output "name" {
value = local.name
}

View File

@ -39,3 +39,7 @@ output "resource_group" {
output "subscription_id" {
value = data.azurerm_subscription.current.subscription_id
}
output "name" {
value = local.name
}

View File

@ -30,3 +30,7 @@ output "ip_cidr_nodes" {
output "ip_cidr_pods" {
value = local.cidr_vpc_subnet_pods
}
output "name" {
value = local.name
}

View File

@ -14,3 +14,7 @@ output "initSecret" {
value = random_password.initSecret.result
sensitive = true
}
output "name" {
value = local.name
}

View File

@ -38,3 +38,7 @@ output "validate_constellation_cmdline" {
error_message = "constellation_cmdline must be set if constellation_boot_mode is 'direct-linux-boot'"
}
}
output "name" {
value = "${var.name}-qemu" // placeholder, as per "uid" output
}

View File

@ -223,6 +223,9 @@ func TestCreateCluster(t *testing.T) {
"api_server_cert_sans": {
Value: []any{"192.0.2.100"},
},
"name": {
Value: "constell-12345abc",
},
},
},
}
@ -262,6 +265,9 @@ func TestCreateCluster(t *testing.T) {
"loadbalancer_name": {
Value: "test_lb_name",
},
"name": {
Value: "constell-12345abc",
},
},
},
}
@ -398,6 +404,20 @@ func TestCreateCluster(t *testing.T) {
fs: afero.NewMemMapFs(),
wantErr: true,
},
"name has wrong type": {
pathBase: "terraform",
provider: cloudprovider.QEMU,
vars: qemuVars,
tf: &stubTerraform{
showState: &tfjson.State{
Values: &tfjson.StateValues{
Outputs: map[string]*tfjson.StateOutput{"name": {Value: 42}},
},
},
},
fs: afero.NewMemMapFs(),
wantErr: true,
},
"working attestation url": {
pathBase: "terraform",
provider: cloudprovider.Azure,
@ -457,7 +477,7 @@ func TestCreateCluster(t *testing.T) {
}
assert.NoError(err)
assert.Equal("192.0.2.100", infraState.ClusterEndpoint)
assert.Equal("initSecret", infraState.InitSecret)
assert.Equal([]byte("initSecret"), infraState.InitSecret)
assert.Equal("12345abc", infraState.UID)
if tc.provider == cloudprovider.Azure {
assert.Equal(tc.expectedAttestationURL, infraState.Azure.AttestationURL)

View File

@ -113,11 +113,11 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c
return err
}
if len(ips) == 0 {
var idFile clusterIDsFile
if err := fileHandler.ReadJSON(constants.ClusterIDsFilename, &idFile); err != nil {
return fmt.Errorf("reading cluster IDs file: %w", err)
var stateFile clusterStateFile
if err := fileHandler.ReadYAML(constants.StateFilename, &stateFile); err != nil {
return fmt.Errorf("reading cluster state file: %w", err)
}
ips = []string{idFile.IP}
ips = []string{stateFile.Infrastructure.ClusterEndpoint}
}
info, err := cmd.Flags().GetStringToString("info")
@ -285,8 +285,8 @@ type fileTransferer interface {
SetFiles(files []filetransfer.FileStat)
}
type clusterIDsFile struct {
ClusterID string
OwnerID string
IP string
type clusterStateFile struct {
Infrastructure struct {
ClusterEndpoint string `yaml:"clusterEndpoint"`
} `yaml:"infrastructure"`
}

View File

@ -8,7 +8,7 @@ The CLI is also used for updating your cluster.
## Workspaces
Each Constellation cluster has an associated *workspace*.
The workspace is where data such as the Constellation state, config, and ID files are stored.
The workspace is where data such as the Constellation state and config files are stored.
Each workspace is associated with a single cluster and configuration.
The CLI stores state in the local filesystem making the current directory the active workspace.
Multiple clusters require multiple workspaces, hence, multiple directories.
@ -21,14 +21,14 @@ To allow for fine-grained configuration of your cluster and cloud environment, C
Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace:
* a configuration file
* an ID file
* a state file
* a Base64-encoded master secret
* [Terraform artifacts](../reference/terraform.md), stored in subdirectories
* a Kubernetes `kubeconfig` file.
After the creation of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file.
After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file.
This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool.
In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in a file called `constellation-id.json`
In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file.
### Creation process details

View File

@ -380,7 +380,7 @@ Verify the confidential properties of a Constellation cluster
### Synopsis
Verify the confidential properties of a Constellation cluster.
If arguments aren't specified, values are read from `constellation-id.json`.
If arguments aren't specified, values are read from `constellation-state.yaml`.
```
constellation verify [flags]

View File

@ -65,14 +65,16 @@ terraform init
terraform apply
```
The Constellation [init step](#the-init-step) requires the already created `constellation-config.yaml` and the `constellation-id.json`.
Create the `constellation-id.json` using the output from the Terraform state and the `constellation-conf.yaml`:
The Constellation [init step](#the-init-step) requires the already created `constellation-config.yaml` and the `constellation-state.yaml`.
Create the `constellation-state.yaml` using the output from the Terraform state and the `constellation-conf.yaml`:
```bash
CONSTELL_IP=$(terraform output ip)
CONSTELL_INIT_SECRET=$(terraform output initSecret | jq -r | tr -d '\n' | base64)
CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]")
jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json
touch constellation-state.yaml
yq eval '.version ="v1"' --inplace constellation-state.yaml
yq eval '.infrastructure.initSecret ="$CONSTELL_INIT_SECRET"' --inplace constellation-state.yaml
yq eval '.infrastructure.clusterEndpoint ="$CONSTELL_IP"' --inplace constellation-state.yaml
```
</tabItem>

View File

@ -125,7 +125,7 @@ This means that you have to recover the node manually.
Recovering a cluster requires the following parameters:
* The `constellation-id.json` file in your working directory or the cluster's load balancer IP address
* The `constellation-state.yaml` file in your working directory or the cluster's endpoint
* The master secret of the cluster
A cluster can be recovered like this:

View File

@ -51,7 +51,7 @@ terraform destroy
Delete all files that are no longer needed:
```bash
rm constellation-id.json constellation-admin.conf
rm constellation-state.yaml constellation-admin.conf
```
Only the `constellation-mastersecret.json` and the configuration file remain.

View File

@ -78,7 +78,7 @@ From the attestation statement, the command verifies the following properties:
* The cluster is using the correct Confidential VM (CVM) type.
* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step.
* The unique ID of the cluster matches the one from your `constellation-id.json` file or passed in via `--cluster-id`.
* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`.
Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape.

1
go.mod
View File

@ -45,6 +45,7 @@ require (
cloud.google.com/go/logging v1.7.0
cloud.google.com/go/secretmanager v1.11.1
cloud.google.com/go/storage v1.31.0
dario.cat/mergo v1.0.0
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0

2
go.sum
View File

@ -59,6 +59,8 @@ cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdT
cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=

View File

@ -57,6 +57,7 @@ require (
cloud.google.com/go/compute v1.20.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect

View File

@ -47,6 +47,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=

View File

@ -232,3 +232,8 @@ func (h *Handler) CopyFile(src, dst string, opts ...Option) error {
return nil
}
// RenameFile renames a file, overwriting any existing file at the destination.
func (h *Handler) RenameFile(old, new string) error {
return h.fs.Rename(old, new)
}

View File

@ -540,3 +540,58 @@ func TestCopyDir(t *testing.T) {
})
}
}
func TestRename(t *testing.T) {
setupHandler := func(existingFiles ...string) Handler {
fs := afero.NewMemMapFs()
handler := NewHandler(fs)
for _, file := range existingFiles {
err := handler.Write(file, []byte("some content"), OptMkdirAll)
require.NoError(t, err)
}
return handler
}
testCases := map[string]struct {
handler Handler
renames map[string]string
checkFiles []string
wantErr bool
}{
"successful rename": {
handler: setupHandler("someFile"),
renames: map[string]string{"someFile": "someOtherFile"},
checkFiles: []string{"someOtherFile"},
},
"rename to existing file, overwrite": {
handler: setupHandler("someFile", "someOtherFile"),
renames: map[string]string{"someFile": "someOtherFile"},
checkFiles: []string{"someOtherFile"},
},
"file does not exist": {
handler: setupHandler(),
renames: map[string]string{"someFile": "someOtherFile"},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
require := require.New(t)
for old, new := range tc.renames {
err := tc.handler.RenameFile(old, new)
if tc.wantErr {
require.Error(err)
} else {
require.NoError(err)
}
}
for _, file := range tc.checkFiles {
_, err := tc.handler.fs.Stat(file)
require.NoError(err)
}
})
}
}

View File

@ -91,6 +91,7 @@ clusterValues:
clusterID: "00112233445566778899AABBCCDDEEFF" # cluster ID uniquely identifies this Constellation cluster.
ownerID: "00112233445566778899AABBCCDDEEFF" # owner ID identifies this cluster as belonging to owner.
measurementSalt: "c2VjcmV0Cg==" # measurement salt is used by nodes to derive their cluster ID.
name: "constell-001122" # name of the cluster, as used in e.g. cluster resource naming.
```
## Updates to the state file