mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
constellation-lib: move kubecmd
package usage (#2673)
* Reduce external dependencies of kubecmd package * Add kubecmd wrapper to constellation-lib * Update CLI code to use constellation-lib * Move kubecmd package to subpackage of constellation-lib * Initialise helm and kubecmd clients when kubeConfig is set --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
c07c333d3d
commit
3691defce7
21
CODEOWNERS
21
CODEOWNERS
@ -1,19 +1,14 @@
|
||||
.golangci.yml @katexochen
|
||||
/3rdparty/gcp-guest-agent @malt3
|
||||
/bazel @malt3
|
||||
/bazel/ci @katexochen
|
||||
/bazel/container @katexochen
|
||||
/bazel @malt3
|
||||
/bazel/sh @katexochen
|
||||
/bootstrapper @3u13r
|
||||
/internal/cloudcmd @daniel-weisse
|
||||
/internal/cmd/upgrade* @derpsteb
|
||||
/internal/featureset @malt3
|
||||
/internal/helm @derpsteb
|
||||
/internal/kubecmd @daniel-weisse
|
||||
/internal/libvirt @daniel-weisse
|
||||
/internal/terraform @elchead
|
||||
/internal/upgrade @elchead
|
||||
/internal/state @elchead
|
||||
/cli/internal/cloudcmd @daniel-weisse
|
||||
/cli/internal/cmd/upgrade* @derpsteb
|
||||
/cli/internal/libvirt @daniel-weisse
|
||||
/cli/internal/terraform @elchead
|
||||
/csi @daniel-weisse
|
||||
/debugd @malt3
|
||||
/disk-mapper @daniel-weisse
|
||||
@ -21,8 +16,8 @@
|
||||
/e2e @3u13r
|
||||
/hack/azure-snp-report-verify @derpsteb
|
||||
/hack/bazel-deps-mirror @malt3
|
||||
/hack/cli-k8s-compatibility @derpsteb
|
||||
/hack/clidocgen @thomasten
|
||||
/hack/cli-k8s-compatibility @derpsteb
|
||||
/hack/fetch-broken-e2e @katexochen
|
||||
/hack/gocoverage @katexochen
|
||||
/hack/oci-pin @malt3
|
||||
@ -38,11 +33,14 @@
|
||||
/internal/cloud @3u13r
|
||||
/internal/compatibility @derpsteb
|
||||
/internal/config @derpsteb
|
||||
/internal/constellation/kubecmd @daniel-weisse
|
||||
/internal/containerimage @malt3
|
||||
/internal/crypto @thomasten
|
||||
/internal/cryptsetup @daniel-weisse
|
||||
/internal/featureset @malt3
|
||||
/internal/file @daniel-weisse
|
||||
/internal/grpc @thomasten
|
||||
/internal/helm @derpsteb
|
||||
/internal/imagefetcher @malt3
|
||||
/internal/installer @3u13r
|
||||
/internal/kms @daniel-weisse
|
||||
@ -54,6 +52,7 @@
|
||||
/internal/retry @katexochen
|
||||
/internal/semver @derpsteb
|
||||
/internal/sigstore @elchead
|
||||
/internal/state @elchead
|
||||
/internal/staticupload @malt3
|
||||
/internal/versions @3u13r
|
||||
/joinservice @daniel-weisse
|
||||
|
@ -73,14 +73,15 @@ go_library(
|
||||
"//internal/config/migration",
|
||||
"//internal/constants",
|
||||
"//internal/constellation",
|
||||
"//internal/constellation/kubecmd",
|
||||
"//internal/crypto",
|
||||
"//internal/featureset",
|
||||
"//internal/file",
|
||||
"//internal/grpc/dialer",
|
||||
"//internal/grpc/retry",
|
||||
"//internal/helm",
|
||||
"//internal/imagefetcher",
|
||||
"//internal/kms/uri",
|
||||
"//internal/kubecmd",
|
||||
# keep
|
||||
"//internal/license",
|
||||
"//internal/logger",
|
||||
@ -164,6 +165,7 @@ go_test(
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/constellation",
|
||||
"//internal/constellation/kubecmd",
|
||||
"//internal/crypto",
|
||||
"//internal/crypto/testvector",
|
||||
"//internal/file",
|
||||
@ -172,7 +174,6 @@ go_test(
|
||||
"//internal/grpc/testdialer",
|
||||
"//internal/helm",
|
||||
"//internal/kms/uri",
|
||||
"//internal/kubecmd",
|
||||
"//internal/logger",
|
||||
"//internal/semver",
|
||||
"//internal/state",
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/atls"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
@ -29,16 +30,19 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/v2/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/state"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
@ -225,13 +229,7 @@ func runApply(cmd *cobra.Command, _ []string) error {
|
||||
newDialer := func(validator atls.Validator) *dialer.Dialer {
|
||||
return dialer.New(nil, validator, &net.Dialer{})
|
||||
}
|
||||
newKubeUpgrader := func(w io.Writer, kubeConfigPath string, log debugLog) (kubernetesUpgrader, error) {
|
||||
kubeConfig, err := fileHandler.Read(kubeConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading kubeconfig: %w", err)
|
||||
}
|
||||
return kubecmd.New(w, kubeConfig, fileHandler, log)
|
||||
}
|
||||
|
||||
newHelmClient := func(kubeConfigPath string, log debugLog) (helmApplier, error) {
|
||||
kubeConfig, err := fileHandler.Read(kubeConfigPath)
|
||||
if err != nil {
|
||||
@ -264,9 +262,8 @@ func runApply(cmd *cobra.Command, _ []string) error {
|
||||
spinner: spinner,
|
||||
merger: &kubeconfigMerger{log: log},
|
||||
newHelmClient: newHelmClient,
|
||||
newDialer: newDialer,
|
||||
newKubeUpgrader: newKubeUpgrader,
|
||||
newInfraApplier: newInfraApplier,
|
||||
imageFetcher: imagefetcher.New(),
|
||||
applier: applier,
|
||||
}
|
||||
|
||||
@ -287,15 +284,15 @@ type applyCmd struct {
|
||||
|
||||
merger configMerger
|
||||
|
||||
applier applier
|
||||
imageFetcher imageFetcher
|
||||
applier applier
|
||||
|
||||
newHelmClient func(kubeConfigPath string, log debugLog) (helmApplier, error)
|
||||
newDialer func(validator atls.Validator) *dialer.Dialer
|
||||
newKubeUpgrader func(out io.Writer, kubeConfigPath string, log debugLog) (kubernetesUpgrader, error)
|
||||
newInfraApplier func(context.Context) (cloudApplier, func(), error)
|
||||
}
|
||||
|
||||
type applier interface {
|
||||
SetKubeConfig(kubeConfig []byte) error
|
||||
CheckLicense(ctx context.Context, csp cloudprovider.Provider, licenseID string) (int, error)
|
||||
GenerateMasterSecret() (uri.MasterSecret, error)
|
||||
GenerateMeasurementSalt() ([]byte, error)
|
||||
@ -309,6 +306,13 @@ type applier interface {
|
||||
*initproto.InitSuccessResponse,
|
||||
error,
|
||||
)
|
||||
ExtendClusterConfigCertSANs(ctx context.Context, clusterEndpoint, customEndpoint string, additionalAPIServerCertSANs []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
ApplyJoinConfig(ctx context.Context, newAttestConfig config.AttestationCfg, measurementSalt []byte) error
|
||||
UpgradeNodeImage(ctx context.Context, imageVersion semver.Semver, imageReference string, force bool) error
|
||||
UpgradeKubernetesVersion(ctx context.Context, kubernetesVersion versions.ValidK8sVersion, force bool) error
|
||||
BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error)
|
||||
BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error
|
||||
}
|
||||
|
||||
type warnLog interface {
|
||||
@ -415,42 +419,57 @@ func (a *applyCmd) apply(
|
||||
}
|
||||
}
|
||||
|
||||
if a.flags.skipPhases.contains(skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase) {
|
||||
cmd.Print(bufferedOutput.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// From now on we can assume a valid Kubernetes admin config file exists
|
||||
var kubeUpgrader kubernetesUpgrader
|
||||
if !a.flags.skipPhases.contains(skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipK8sPhase, skipImagePhase) {
|
||||
a.log.Debugf("Creating Kubernetes client using %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))
|
||||
kubeUpgrader, err = a.newKubeUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, a.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeConfig, err := a.fileHandler.Read(constants.AdminConfFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading kubeconfig: %w", err)
|
||||
}
|
||||
if err := a.applier.SetKubeConfig(kubeConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply Attestation Config
|
||||
if !a.flags.skipPhases.contains(skipAttestationConfigPhase) {
|
||||
a.log.Debugf("Applying new attestation config to cluster")
|
||||
if err := a.applyJoinConfig(cmd, kubeUpgrader, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt); err != nil {
|
||||
if err := a.applyJoinConfig(cmd, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt); err != nil {
|
||||
return fmt.Errorf("applying attestation config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Extend API Server Cert SANs
|
||||
if !a.flags.skipPhases.contains(skipCertSANsPhase) {
|
||||
sans := append([]string{stateFile.Infrastructure.ClusterEndpoint, conf.CustomEndpoint}, stateFile.Infrastructure.APIServerCertSANs...)
|
||||
if err := kubeUpgrader.ExtendClusterConfigCertSANs(cmd.Context(), sans); err != nil {
|
||||
if err := a.applier.ExtendClusterConfigCertSANs(
|
||||
cmd.Context(),
|
||||
stateFile.Infrastructure.ClusterEndpoint,
|
||||
conf.CustomEndpoint,
|
||||
stateFile.Infrastructure.APIServerCertSANs,
|
||||
); err != nil {
|
||||
return fmt.Errorf("extending cert SANs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply Helm Charts
|
||||
if !a.flags.skipPhases.contains(skipHelmPhase) {
|
||||
if err := a.runHelmApply(cmd, conf, stateFile, kubeUpgrader, upgradeDir); err != nil {
|
||||
if err := a.runHelmApply(cmd, conf, stateFile, upgradeDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Upgrade NodeVersion object
|
||||
if !(a.flags.skipPhases.contains(skipK8sPhase, skipImagePhase)) {
|
||||
if err := a.runK8sUpgrade(cmd, conf, kubeUpgrader); err != nil {
|
||||
// Upgrade node image
|
||||
if !a.flags.skipPhases.contains(skipImagePhase) {
|
||||
if err := a.runNodeImageUpgrade(cmd, conf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Upgrade Kubernetes version
|
||||
if !a.flags.skipPhases.contains(skipK8sPhase) {
|
||||
if err := a.runK8sVersionUpgrade(cmd, conf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -598,15 +617,14 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
|
||||
|
||||
// applyJoinConfig creates or updates the cluster's join config.
|
||||
// If the config already exists, and is different from the new config, the user is asked to confirm the upgrade.
|
||||
func (a *applyCmd) applyJoinConfig(
|
||||
cmd *cobra.Command, kubeUpgrader kubernetesUpgrader, newConfig config.AttestationCfg, measurementSalt []byte,
|
||||
func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.AttestationCfg, measurementSalt []byte,
|
||||
) error {
|
||||
clusterAttestationConfig, err := kubeUpgrader.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
|
||||
clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
|
||||
if err != nil {
|
||||
a.log.Debugf("Getting cluster attestation config failed: %s", err)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
a.log.Debugf("Creating new join config")
|
||||
return kubeUpgrader.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt)
|
||||
return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt)
|
||||
}
|
||||
return fmt.Errorf("getting cluster attestation config: %w", err)
|
||||
}
|
||||
@ -638,7 +656,7 @@ func (a *applyCmd) applyJoinConfig(
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubeUpgrader.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt); err != nil {
|
||||
if err := a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt); err != nil {
|
||||
return fmt.Errorf("updating attestation config: %w", err)
|
||||
}
|
||||
cmd.Println("Successfully updated the cluster's attestation config")
|
||||
@ -646,19 +664,29 @@ func (a *applyCmd) applyJoinConfig(
|
||||
return nil
|
||||
}
|
||||
|
||||
// runK8sUpgrade upgrades image and Kubernetes version of the Constellation cluster.
|
||||
func (a *applyCmd) runK8sUpgrade(cmd *cobra.Command, conf *config.Config, kubeUpgrader kubernetesUpgrader,
|
||||
) error {
|
||||
err := kubeUpgrader.UpgradeNodeVersion(
|
||||
cmd.Context(), conf, a.flags.force,
|
||||
a.flags.skipPhases.contains(skipImagePhase),
|
||||
a.flags.skipPhases.contains(skipK8sPhase),
|
||||
)
|
||||
func (a *applyCmd) runNodeImageUpgrade(cmd *cobra.Command, conf *config.Config) error {
|
||||
provider := conf.GetProvider()
|
||||
attestationVariant := conf.GetAttestationConfig().GetVariant()
|
||||
region := conf.GetRegion()
|
||||
imageReference, err := a.imageFetcher.FetchReference(cmd.Context(), provider, attestationVariant, conf.Image, region)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image reference: %w", err)
|
||||
}
|
||||
|
||||
imageVersionInfo, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing version from image short path: %w", err)
|
||||
}
|
||||
imageVersion, err := semver.New(imageVersionInfo.Version())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image version: %w", err)
|
||||
}
|
||||
|
||||
err = a.applier.UpgradeNodeImage(cmd.Context(), imageVersion, imageReference, a.flags.force)
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
switch {
|
||||
case errors.Is(err, kubecmd.ErrInProgress):
|
||||
cmd.PrintErrln("Skipping image and Kubernetes upgrades. Another upgrade is in progress.")
|
||||
cmd.PrintErrln("Skipping image upgrade: Another upgrade is already in progress.")
|
||||
case errors.As(err, &upgradeErr):
|
||||
cmd.PrintErrln(err)
|
||||
case err != nil:
|
||||
@ -668,6 +696,19 @@ func (a *applyCmd) runK8sUpgrade(cmd *cobra.Command, conf *config.Config, kubeUp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applyCmd) runK8sVersionUpgrade(cmd *cobra.Command, conf *config.Config) error {
|
||||
err := a.applier.UpgradeKubernetesVersion(cmd.Context(), conf.KubernetesVersion, a.flags.force)
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
cmd.PrintErrln(err)
|
||||
case err != nil:
|
||||
return fmt.Errorf("upgrading Kubernetes version: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkCreateFilesClean ensures that the workspace is clean before creating a new cluster.
|
||||
func (a *applyCmd) checkCreateFilesClean() error {
|
||||
if err := a.checkInitFilesClean(); err != nil {
|
||||
@ -803,3 +844,11 @@ func (wl warnLogger) Infof(fmtStr string, args ...any) {
|
||||
func (wl warnLogger) Warnf(fmtStr string, args ...any) {
|
||||
wl.cmd.PrintErrf("Warning: %s\n", fmt.Sprintf(fmtStr, args...))
|
||||
}
|
||||
|
||||
// imageFetcher gets an image reference from the versionsapi.
|
||||
type imageFetcher interface {
|
||||
FetchReference(ctx context.Context,
|
||||
provider cloudprovider.Provider, attestationVariant variant.Variant,
|
||||
image, region string,
|
||||
) (string, error)
|
||||
}
|
||||
|
@ -193,10 +193,11 @@ func TestBackupHelmCharts(t *testing.T) {
|
||||
|
||||
a := applyCmd{
|
||||
fileHandler: file.NewHandler(afero.NewMemMapFs()),
|
||||
applier: &stubConstellApplier{stubKubernetesUpgrader: tc.backupClient},
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
err := a.backupHelmCharts(context.Background(), tc.backupClient, tc.helmApplier, tc.includesUpgrades, "")
|
||||
err := a.backupHelmCharts(context.Background(), tc.helmApplier, tc.includesUpgrades, "")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
@ -494,23 +495,29 @@ func newPhases(phases ...skipPhase) skipPhases {
|
||||
|
||||
type stubConstellApplier struct {
|
||||
checkLicenseErr error
|
||||
masterSecret uri.MasterSecret
|
||||
measurementSalt []byte
|
||||
generateMasterSecretErr error
|
||||
generateMeasurementSaltErr error
|
||||
initErr error
|
||||
initResponse *initproto.InitSuccessResponse
|
||||
*stubKubernetesUpgrader
|
||||
}
|
||||
|
||||
func (s *stubConstellApplier) SetKubeConfig([]byte) error { return nil }
|
||||
|
||||
func (s *stubConstellApplier) CheckLicense(context.Context, cloudprovider.Provider, string) (int, error) {
|
||||
return 0, s.checkLicenseErr
|
||||
}
|
||||
|
||||
func (s *stubConstellApplier) GenerateMasterSecret() (uri.MasterSecret, error) {
|
||||
return uri.MasterSecret{}, s.generateMasterSecretErr
|
||||
return s.masterSecret, s.generateMasterSecretErr
|
||||
}
|
||||
|
||||
func (s *stubConstellApplier) GenerateMeasurementSalt() ([]byte, error) {
|
||||
return nil, s.generateMeasurementSaltErr
|
||||
return s.measurementSalt, s.generateMeasurementSaltErr
|
||||
}
|
||||
|
||||
func (s *stubConstellApplier) Init(context.Context, atls.Validator, *state.State, io.Writer, constellation.InitPayload) (*initproto.InitSuccessResponse, error) {
|
||||
return nil, s.initErr
|
||||
return s.initResponse, s.initErr
|
||||
}
|
||||
|
@ -23,9 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// runHelmApply handles installing or upgrading helm charts for the cluster.
|
||||
func (a *applyCmd) runHelmApply(
|
||||
cmd *cobra.Command, conf *config.Config, stateFile *state.State,
|
||||
kubeUpgrader kubernetesUpgrader, upgradeDir string,
|
||||
func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string,
|
||||
) error {
|
||||
a.log.Debugf("Installing or upgrading Helm charts")
|
||||
var masterSecret uri.MasterSecret
|
||||
@ -80,7 +78,7 @@ func (a *applyCmd) runHelmApply(
|
||||
}
|
||||
|
||||
a.log.Debugf("Backing up Helm charts")
|
||||
if err := a.backupHelmCharts(cmd.Context(), kubeUpgrader, executor, includesUpgrades, upgradeDir); err != nil {
|
||||
if err := a.backupHelmCharts(cmd.Context(), executor, includesUpgrades, upgradeDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -105,7 +103,7 @@ func (a *applyCmd) runHelmApply(
|
||||
|
||||
// backupHelmCharts saves the Helm charts for the upgrade to disk and creates a backup of existing CRDs and CRs.
|
||||
func (a *applyCmd) backupHelmCharts(
|
||||
ctx context.Context, kubeUpgrader kubernetesUpgrader, executor helm.Applier, includesUpgrades bool, upgradeDir string,
|
||||
ctx context.Context, executor helm.Applier, includesUpgrades bool, upgradeDir string,
|
||||
) error {
|
||||
// Save the Helm charts for the upgrade to disk
|
||||
chartDir := filepath.Join(upgradeDir, "helm-charts")
|
||||
@ -116,11 +114,11 @@ func (a *applyCmd) backupHelmCharts(
|
||||
|
||||
if includesUpgrades {
|
||||
a.log.Debugf("Creating backup of CRDs and CRs")
|
||||
crds, err := kubeUpgrader.BackupCRDs(ctx, upgradeDir)
|
||||
crds, err := a.applier.BackupCRDs(ctx, a.fileHandler, upgradeDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating CRD backup: %w", err)
|
||||
}
|
||||
if err := kubeUpgrader.BackupCRs(ctx, crds, upgradeDir); err != nil {
|
||||
if err := a.applier.BackupCRs(ctx, a.fileHandler, crds, upgradeDir); err != nil {
|
||||
return fmt.Errorf("creating CR backup: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -10,19 +10,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
|
||||
"github.com/edgelesssys/constellation/v2/internal/atls"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
@ -31,9 +25,6 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/testdialer"
|
||||
"github.com/edgelesssys/constellation/v2/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
@ -43,7 +34,6 @@ import (
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@ -102,7 +92,8 @@ func TestInitialize(t *testing.T) {
|
||||
stateFile *state.State
|
||||
configMutator func(*config.Config)
|
||||
serviceAccKey *gcpshared.ServiceAccountKey
|
||||
initServerAPI *stubInitServer
|
||||
initResponse *initproto.InitSuccessResponse
|
||||
initErr error
|
||||
retriable bool
|
||||
masterSecretShouldExist bool
|
||||
wantErr bool
|
||||
@ -112,47 +103,30 @@ func TestInitialize(t *testing.T) {
|
||||
stateFile: preInitStateFile(cloudprovider.GCP),
|
||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||
serviceAccKey: gcpServiceAccKey,
|
||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||
initResponse: testInitResp,
|
||||
},
|
||||
"initialize some azure instances": {
|
||||
provider: cloudprovider.Azure,
|
||||
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||
provider: cloudprovider.Azure,
|
||||
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||
initResponse: testInitResp,
|
||||
},
|
||||
"initialize some qemu instances": {
|
||||
provider: cloudprovider.QEMU,
|
||||
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||
provider: cloudprovider.QEMU,
|
||||
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||
initResponse: testInitResp,
|
||||
},
|
||||
"non retriable error": {
|
||||
provider: cloudprovider.QEMU,
|
||||
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||
initServerAPI: &stubInitServer{initErr: &constellation.NonRetriableInitError{Err: assert.AnError}},
|
||||
initErr: &constellation.NonRetriableInitError{Err: assert.AnError},
|
||||
retriable: false,
|
||||
masterSecretShouldExist: true,
|
||||
wantErr: true,
|
||||
},
|
||||
"non retriable error with failed log collection": {
|
||||
provider: cloudprovider.QEMU,
|
||||
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||
initServerAPI: &stubInitServer{
|
||||
res: []*initproto.InitResponse{
|
||||
{
|
||||
Kind: &initproto.InitResponse_InitFailure{
|
||||
InitFailure: &initproto.InitFailureResponse{
|
||||
Error: "error",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Kind: &initproto.InitResponse_InitFailure{
|
||||
InitFailure: &initproto.InitFailureResponse{
|
||||
Error: "error",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: cloudprovider.QEMU,
|
||||
stateFile: preInitStateFile(cloudprovider.QEMU),
|
||||
initErr: &constellation.NonRetriableInitError{Err: assert.AnError, LogCollectionErr: assert.AnError},
|
||||
retriable: false,
|
||||
masterSecretShouldExist: true,
|
||||
wantErr: true,
|
||||
@ -162,7 +136,7 @@ func TestInitialize(t *testing.T) {
|
||||
stateFile: &state.State{Version: "invalid"},
|
||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||
serviceAccKey: gcpServiceAccKey,
|
||||
initServerAPI: &stubInitServer{},
|
||||
initResponse: testInitResp,
|
||||
retriable: true,
|
||||
wantErr: true,
|
||||
},
|
||||
@ -171,7 +145,7 @@ func TestInitialize(t *testing.T) {
|
||||
stateFile: &state.State{},
|
||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||
serviceAccKey: gcpServiceAccKey,
|
||||
initServerAPI: &stubInitServer{},
|
||||
initResponse: testInitResp,
|
||||
retriable: true,
|
||||
wantErr: true,
|
||||
},
|
||||
@ -179,6 +153,7 @@ func TestInitialize(t *testing.T) {
|
||||
provider: cloudprovider.GCP,
|
||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||
serviceAccKey: gcpServiceAccKey,
|
||||
initResponse: testInitResp,
|
||||
retriable: true,
|
||||
wantErr: true,
|
||||
},
|
||||
@ -187,15 +162,15 @@ func TestInitialize(t *testing.T) {
|
||||
configMutator: func(c *config.Config) { c.Provider.GCP.ServiceAccountKeyPath = serviceAccPath },
|
||||
stateFile: preInitStateFile(cloudprovider.GCP),
|
||||
serviceAccKey: gcpServiceAccKey,
|
||||
initServerAPI: &stubInitServer{initErr: assert.AnError},
|
||||
initErr: &constellation.NonRetriableInitError{Err: assert.AnError},
|
||||
retriable: false,
|
||||
masterSecretShouldExist: true,
|
||||
wantErr: true,
|
||||
},
|
||||
"k8s version without v works": {
|
||||
provider: cloudprovider.Azure,
|
||||
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||
provider: cloudprovider.Azure,
|
||||
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||
initResponse: testInitResp,
|
||||
configMutator: func(c *config.Config) {
|
||||
res, err := versions.NewValidK8sVersion(strings.TrimPrefix(string(versions.Default), "v"), true)
|
||||
require.NoError(t, err)
|
||||
@ -203,9 +178,9 @@ func TestInitialize(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"outdated k8s patch version doesn't work": {
|
||||
provider: cloudprovider.Azure,
|
||||
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||
initServerAPI: &stubInitServer{res: []*initproto.InitResponse{{Kind: &initproto.InitResponse_InitSuccess{InitSuccess: testInitResp}}}},
|
||||
provider: cloudprovider.Azure,
|
||||
stateFile: preInitStateFile(cloudprovider.Azure),
|
||||
initResponse: testInitResp,
|
||||
configMutator: func(c *config.Config) {
|
||||
v, err := semver.New(versions.SupportedK8sVersions()[0])
|
||||
require.NoError(t, err)
|
||||
@ -221,18 +196,6 @@ func TestInitialize(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
// Networking
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
newDialer := func(atls.Validator) *dialer.Dialer {
|
||||
return dialer.New(nil, nil, netDialer)
|
||||
}
|
||||
serverCreds := atlscredentials.New(nil, nil)
|
||||
initServer := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
initproto.RegisterAPIServer(initServer, tc.initServerAPI)
|
||||
port := strconv.Itoa(constants.BootstrapperPort)
|
||||
listener := netDialer.GetListener(net.JoinHostPort("192.0.2.1", port))
|
||||
go initServer.Serve(listener)
|
||||
defer initServer.GracefulStop()
|
||||
|
||||
// Command
|
||||
cmd := NewInitCmd()
|
||||
@ -271,20 +234,21 @@ func TestInitialize(t *testing.T) {
|
||||
spinner: &nopSpinner{},
|
||||
merger: &stubMerger{},
|
||||
newHelmClient: func(string, debugLog) (helmApplier, error) {
|
||||
return &stubApplier{}, nil
|
||||
return &stubHelmApplier{}, nil
|
||||
},
|
||||
newDialer: newDialer,
|
||||
newKubeUpgrader: func(io.Writer, string, debugLog) (kubernetesUpgrader, error) {
|
||||
return &stubKubernetesUpgrader{
|
||||
applier: &stubConstellApplier{
|
||||
masterSecret: uri.MasterSecret{
|
||||
Key: bytes.Repeat([]byte{0x01}, 32),
|
||||
Salt: bytes.Repeat([]byte{0x02}, 32),
|
||||
},
|
||||
measurementSalt: bytes.Repeat([]byte{0x03}, 32),
|
||||
initErr: tc.initErr,
|
||||
initResponse: tc.initResponse,
|
||||
stubKubernetesUpgrader: &stubKubernetesUpgrader{
|
||||
// On init, no attestation config exists yet
|
||||
getClusterAttestationConfigErr: k8serrors.NewNotFound(schema.GroupResource{}, ""),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
applier: constellation.NewApplier(
|
||||
logger.NewTest(t),
|
||||
&nopSpinner{},
|
||||
newDialer,
|
||||
),
|
||||
}
|
||||
|
||||
err := i.apply(cmd, stubAttestationFetcher{}, "test")
|
||||
@ -314,11 +278,14 @@ func TestInitialize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type stubApplier struct {
|
||||
type stubHelmApplier struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (s stubApplier) PrepareApply(_ cloudprovider.Provider, _ variant.Variant, _ versions.ValidK8sVersion, _ semver.Semver, _ *state.State, _ helm.Options, _ string, _ uri.MasterSecret, _ *config.OpenStackConfig) (helm.Applier, bool, error) {
|
||||
func (s stubHelmApplier) PrepareApply(
|
||||
_ cloudprovider.Provider, _ variant.Variant, _ versions.ValidK8sVersion, _ semver.Semver,
|
||||
_ *state.State, _ helm.Options, _ string, _ uri.MasterSecret, _ *config.OpenStackConfig,
|
||||
) (helm.Applier, bool, error) {
|
||||
return stubRunner{}, false, s.err
|
||||
}
|
||||
|
||||
@ -513,153 +480,6 @@ func TestGenerateMasterSecret(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestation(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
initServerAPI := &stubInitServer{res: []*initproto.InitResponse{
|
||||
{
|
||||
Kind: &initproto.InitResponse_InitSuccess{
|
||||
InitSuccess: &initproto.InitSuccessResponse{
|
||||
Kubeconfig: []byte("kubeconfig"),
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
existingStateFile := &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.4"}}
|
||||
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
|
||||
issuer := &testIssuer{
|
||||
Getter: variant.QEMUVTPM{},
|
||||
pcrs: map[uint32][]byte{
|
||||
0: bytes.Repeat([]byte{0xFF}, 32),
|
||||
1: bytes.Repeat([]byte{0xFF}, 32),
|
||||
2: bytes.Repeat([]byte{0xFF}, 32),
|
||||
3: bytes.Repeat([]byte{0xFF}, 32),
|
||||
},
|
||||
}
|
||||
serverCreds := atlscredentials.New(issuer, nil)
|
||||
initServer := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
initproto.RegisterAPIServer(initServer, initServerAPI)
|
||||
port := strconv.Itoa(constants.BootstrapperPort)
|
||||
listener := netDialer.GetListener(net.JoinHostPort("192.0.2.4", port))
|
||||
go initServer.Serve(listener)
|
||||
defer initServer.GracefulStop()
|
||||
|
||||
cmd := NewInitCmd()
|
||||
cmd.Flags().String("workspace", "", "") // register persistent flag manually
|
||||
cmd.Flags().Bool("force", true, "") // register persistent flag manually
|
||||
var out bytes.Buffer
|
||||
cmd.SetOut(&out)
|
||||
var errOut bytes.Buffer
|
||||
cmd.SetErr(&errOut)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
fileHandler := file.NewHandler(fs)
|
||||
require.NoError(existingStateFile.WriteToFile(fileHandler, constants.StateFilename))
|
||||
|
||||
cfg := config.Default()
|
||||
cfg.Image = constants.BinaryVersion().String()
|
||||
cfg.RemoveProviderAndAttestationExcept(cloudprovider.QEMU)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[0] = measurements.WithAllBytes(0x00, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[1] = measurements.WithAllBytes(0x11, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[2] = measurements.WithAllBytes(0x22, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[3] = measurements.WithAllBytes(0x33, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[4] = measurements.WithAllBytes(0x44, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[9] = measurements.WithAllBytes(0x99, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
cfg.Attestation.QEMUVTPM.Measurements[12] = measurements.WithAllBytes(0xcc, measurements.Enforce, measurements.PCRMeasurementLength)
|
||||
require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg, file.OptNone))
|
||||
|
||||
newDialer := func(v atls.Validator) *dialer.Dialer {
|
||||
validator := &testValidator{
|
||||
Getter: variant.QEMUVTPM{},
|
||||
pcrs: cfg.GetAttestationConfig().GetMeasurements(),
|
||||
}
|
||||
return dialer.New(nil, validator, netDialer)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
cmd.SetContext(ctx)
|
||||
|
||||
i := &applyCmd{
|
||||
fileHandler: fileHandler,
|
||||
spinner: &nopSpinner{},
|
||||
merger: &stubMerger{},
|
||||
log: logger.NewTest(t),
|
||||
newKubeUpgrader: func(io.Writer, string, debugLog) (kubernetesUpgrader, error) {
|
||||
return &stubKubernetesUpgrader{}, nil
|
||||
},
|
||||
newDialer: newDialer,
|
||||
applier: constellation.NewApplier(logger.NewTest(t), &nopSpinner{}, newDialer),
|
||||
}
|
||||
_, err := i.runInit(cmd, cfg, existingStateFile)
|
||||
assert.Error(err)
|
||||
// make sure the error is actually a TLS handshake error
|
||||
assert.Contains(err.Error(), "transport: authentication handshake failed")
|
||||
if validationErr, ok := err.(*config.ValidationError); ok {
|
||||
t.Log(validationErr.LongMessage())
|
||||
}
|
||||
}
|
||||
|
||||
type testValidator struct {
|
||||
variant.Getter
|
||||
pcrs measurements.M
|
||||
}
|
||||
|
||||
func (v *testValidator) Validate(_ context.Context, attDoc []byte, _ []byte) ([]byte, error) {
|
||||
var attestation struct {
|
||||
UserData []byte
|
||||
PCRs map[uint32][]byte
|
||||
}
|
||||
if err := json.Unmarshal(attDoc, &attestation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, pcr := range v.pcrs {
|
||||
if !bytes.Equal(attestation.PCRs[k], pcr.Expected[:]) {
|
||||
return nil, errors.New("invalid PCR value")
|
||||
}
|
||||
}
|
||||
return attestation.UserData, nil
|
||||
}
|
||||
|
||||
type testIssuer struct {
|
||||
variant.Getter
|
||||
pcrs map[uint32][]byte
|
||||
}
|
||||
|
||||
func (i *testIssuer) Issue(_ context.Context, userData []byte, _ []byte) ([]byte, error) {
|
||||
return json.Marshal(
|
||||
struct {
|
||||
UserData []byte
|
||||
PCRs map[uint32][]byte
|
||||
}{
|
||||
UserData: userData,
|
||||
PCRs: i.pcrs,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
type stubInitServer struct {
|
||||
res []*initproto.InitResponse
|
||||
initErr error
|
||||
|
||||
initproto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
func (s *stubInitServer) Init(_ *initproto.InitRequest, stream initproto.API_InitServer) error {
|
||||
for _, r := range s.res {
|
||||
_ = stream.Send(r)
|
||||
}
|
||||
return s.initErr
|
||||
}
|
||||
|
||||
type stubMerger struct {
|
||||
envVar string
|
||||
mergeErr error
|
||||
|
@ -16,9 +16,9 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubecmd"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
@ -61,7 +61,7 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
||||
}
|
||||
|
||||
fetcher := attestationconfigapi.NewFetcher()
|
||||
kubeClient, err := kubecmd.New(cmd.OutOrStdout(), kubeConfig, fileHandler, log)
|
||||
kubeClient, err := kubecmd.New(kubeConfig, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up kubernetes client: %w", err)
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubecmd"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -7,16 +7,13 @@ SPDX-License-Identifier: AGPL-3.0-only
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/rogpeppe/go-internal/diff"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
)
|
||||
|
||||
func newUpgradeApplyCmd() *cobra.Command {
|
||||
@ -60,12 +57,3 @@ func diffAttestationCfg(currentAttestationCfg config.AttestationCfg, newAttestat
|
||||
diff := string(diff.Diff("current", currentYml, "new", newYml))
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
type kubernetesUpgrader interface {
|
||||
UpgradeNodeVersion(ctx context.Context, conf *config.Config, force, skipImage, skipK8s bool) error
|
||||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
ApplyJoinConfig(ctx context.Context, newAttestConfig config.AttestationCfg, measurementSalt []byte) error
|
||||
BackupCRs(ctx context.Context, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error
|
||||
BackupCRDs(ctx context.Context, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error)
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||
@ -17,10 +16,10 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/state"
|
||||
@ -46,6 +45,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
fh func() file.Handler
|
||||
fhAssertions func(require *require.Assertions, assert *assert.Assertions, fh file.Handler)
|
||||
terraformUpgrader cloudApplier
|
||||
fetchImageErr error
|
||||
wantErr bool
|
||||
customK8sVersion string
|
||||
flags applyFlags
|
||||
@ -53,7 +53,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
}{
|
||||
"success": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
fh: fsWithStateFileAndTfState,
|
||||
@ -66,7 +66,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
},
|
||||
"id file and state file do not exist": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
fh: func() file.Handler {
|
||||
@ -79,7 +79,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
nodeVersionErr: assert.AnError,
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
wantErr: true,
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
@ -90,7 +90,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
nodeVersionErr: kubecmd.ErrInProgress,
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
fh: fsWithStateFileAndTfState,
|
||||
@ -99,7 +99,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{err: assert.AnError},
|
||||
helmUpgrader: stubHelmApplier{err: assert.AnError},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
wantErr: true,
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
@ -109,7 +109,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true},
|
||||
wantErr: true,
|
||||
stdin: "no\n",
|
||||
@ -119,7 +119,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{terraformDiff: true, rollbackWorkspaceErr: assert.AnError},
|
||||
wantErr: true,
|
||||
stdin: "no\n",
|
||||
@ -129,7 +129,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{planTerraformErr: assert.AnError},
|
||||
wantErr: true,
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
@ -139,7 +139,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{
|
||||
applyTerraformErr: assert.AnError,
|
||||
terraformDiff: true,
|
||||
@ -152,7 +152,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
customK8sVersion: func() string {
|
||||
v, err := semver.New(versions.SupportedK8sVersions()[0])
|
||||
@ -166,7 +166,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
customK8sVersion: "v1.20.0",
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
@ -201,7 +201,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
},
|
||||
helmUpgrader: &stubApplier{},
|
||||
helmUpgrader: &stubHelmApplier{},
|
||||
terraformUpgrader: &mockTerraformUpgrader{},
|
||||
flags: applyFlags{
|
||||
yes: true,
|
||||
@ -215,12 +215,21 @@ func TestUpgradeApply(t *testing.T) {
|
||||
},
|
||||
"attempt to change attestation variant": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: &config.AzureTrustedLaunch{}},
|
||||
helmUpgrader: stubApplier{},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
fh: fsWithStateFileAndTfState,
|
||||
wantErr: true,
|
||||
},
|
||||
"image fetching fails": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
helmUpgrader: stubHelmApplier{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
fetchImageErr: assert.AnError,
|
||||
flags: applyFlags{yes: true, skipPhases: skipPhases{skipInitPhase: struct{}{}}},
|
||||
fh: fsWithStateFileAndTfState,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -248,13 +257,11 @@ func TestUpgradeApply(t *testing.T) {
|
||||
newHelmClient: func(string, debugLog) (helmApplier, error) {
|
||||
return tc.helmUpgrader, nil
|
||||
},
|
||||
newKubeUpgrader: func(_ io.Writer, _ string, _ debugLog) (kubernetesUpgrader, error) {
|
||||
return tc.kubeUpgrader, nil
|
||||
},
|
||||
newInfraApplier: func(ctx context.Context) (cloudApplier, func(), error) {
|
||||
return tc.terraformUpgrader, func() {}, nil
|
||||
},
|
||||
applier: &stubConstellApplier{},
|
||||
applier: &stubConstellApplier{stubKubernetesUpgrader: tc.kubeUpgrader},
|
||||
imageFetcher: &stubImageFetcher{fetchReferenceErr: tc.fetchImageErr},
|
||||
}
|
||||
err := upgrader.apply(cmd, stubAttestationFetcher{}, "test")
|
||||
if tc.wantErr {
|
||||
@ -274,30 +281,37 @@ func TestUpgradeApply(t *testing.T) {
|
||||
|
||||
type stubKubernetesUpgrader struct {
|
||||
nodeVersionErr error
|
||||
kubernetesVersionErr error
|
||||
currentConfig config.AttestationCfg
|
||||
getClusterAttestationConfigErr error
|
||||
calledNodeUpgrade bool
|
||||
calledKubernetesUpgrade bool
|
||||
backupCRDsErr error
|
||||
backupCRDsCalled bool
|
||||
backupCRsErr error
|
||||
backupCRsCalled bool
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) BackupCRDs(_ context.Context, _ string) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
func (u *stubKubernetesUpgrader) BackupCRDs(_ context.Context, _ file.Handler, _ string) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
u.backupCRDsCalled = true
|
||||
return []apiextensionsv1.CustomResourceDefinition{}, u.backupCRDsErr
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) BackupCRs(_ context.Context, _ []apiextensionsv1.CustomResourceDefinition, _ string) error {
|
||||
func (u *stubKubernetesUpgrader) BackupCRs(_ context.Context, _ file.Handler, _ []apiextensionsv1.CustomResourceDefinition, _ string) error {
|
||||
u.backupCRsCalled = true
|
||||
return u.backupCRsErr
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _, _, _ bool) error {
|
||||
func (u *stubKubernetesUpgrader) UpgradeNodeImage(_ context.Context, _ semver.Semver, _ string, _ bool) error {
|
||||
u.calledNodeUpgrade = true
|
||||
return u.nodeVersionErr
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) UpgradeKubernetesVersion(_ context.Context, _ versions.ValidK8sVersion, _ bool) error {
|
||||
u.calledKubernetesUpgrade = true
|
||||
return u.kubernetesVersionErr
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) ApplyJoinConfig(_ context.Context, _ config.AttestationCfg, _ []byte) error {
|
||||
return nil
|
||||
}
|
||||
@ -306,7 +320,7 @@ func (u *stubKubernetesUpgrader) GetClusterAttestationConfig(_ context.Context,
|
||||
return u.currentConfig, u.getClusterAttestationConfigErr
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) ExtendClusterConfigCertSANs(_ context.Context, _ []string) error {
|
||||
func (u *stubKubernetesUpgrader) ExtendClusterConfigCertSANs(_ context.Context, _, _ string, _ []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -367,3 +381,15 @@ func (m *mockApplier) PrepareApply(csp cloudprovider.Provider, variant variant.V
|
||||
args := m.Called(csp, variant, k8sVersion, microserviceVersion, stateFile, helmOpts, str, masterSecret, openStackCfg)
|
||||
return args.Get(0).(helm.Applier), args.Bool(1), args.Error(2)
|
||||
}
|
||||
|
||||
type stubImageFetcher struct {
|
||||
reference string
|
||||
fetchReferenceErr error
|
||||
}
|
||||
|
||||
func (f *stubImageFetcher) FetchReference(_ context.Context,
|
||||
_ cloudprovider.Provider, _ variant.Variant,
|
||||
_, _ string,
|
||||
) (string, error) {
|
||||
return f.reference, f.fetchReferenceErr
|
||||
}
|
||||
|
@ -26,10 +26,10 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/featureset"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubecmd"
|
||||
consemver "github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/sigstore"
|
||||
"github.com/edgelesssys/constellation/v2/internal/sigstore/keyselect"
|
||||
@ -120,7 +120,7 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading kubeconfig: %w", err)
|
||||
}
|
||||
kubeChecker, err := kubecmd.New(cmd.OutOrStdout(), kubeConfig, fileHandler, log)
|
||||
kubeChecker, err := kubecmd.New(kubeConfig, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ ok github.com/edgelesssys/constellation/v2/operators/constellation-node-operat
|
||||
`
|
||||
|
||||
const (
|
||||
exampleReportCLI = `{"Metadate":{"Created":"2023-08-24T16:09:02Z"},"Coverage":{"github.com/edgelesssys/constellation/v2/cli":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/cmd":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd":{"Coverage":65.5,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/clusterid":{"Coverage":56.2,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cmd":{"Coverage":53.5,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/featureset":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/helm":{"Coverage":47.7,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/helm/imageversion":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/kubecmd":{"Coverage":54.1,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/libvirt":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/terraform":{"Coverage":71.3,"Notest":false,"Nostmt":false}}}`
|
||||
exampleReportCLI = `{"Metadate":{"Created":"2023-08-24T16:09:02Z"},"Coverage":{"github.com/edgelesssys/constellation/v2/cli":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/cmd":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd":{"Coverage":65.5,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/clusterid":{"Coverage":56.2,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cmd":{"Coverage":53.5,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/featureset":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/helm":{"Coverage":47.7,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/helm/imageversion":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd":{"Coverage":54.1,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/libvirt":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/terraform":{"Coverage":71.3,"Notest":false,"Nostmt":false}}}`
|
||||
exampleReportCLIOld = `{"Metadate":{"Created":"2023-08-24T16:48:39Z"},"Coverage":{"github.com/edgelesssys/constellation/v2/cli":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/cmd":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd":{"Coverage":73.1,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/clusterid":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/cmd":{"Coverage":61.6,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/featureset":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/helm":{"Coverage":51.7,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/internal/helm/imageversion":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/iamid":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes":{"Coverage":49.8,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/libvirt":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/terraform":{"Coverage":66.7,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/cli/internal/upgrade":{"Coverage":83,"Notest":false,"Nostmt":false}}}`
|
||||
exampleReportDisk = `{"Metadate":{"Created":"2023-08-24T16:40:25Z"},"Coverage":{"github.com/edgelesssys/constellation/v2/disk-mapper/cmd":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption":{"Coverage":0,"Notest":true,"Nostmt":false},"github.com/edgelesssys/constellation/v2/disk-mapper/internal/recoveryserver":{"Coverage":89.1,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/disk-mapper/internal/rejoinclient":{"Coverage":91.8,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/disk-mapper/internal/setup":{"Coverage":68.9,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/disk-mapper/internal/systemd":{"Coverage":25.8,"Notest":false,"Nostmt":false},"github.com/edgelesssys/constellation/v2/disk-mapper/recoverproto":{"Coverage":0,"Notest":true,"Nostmt":false}}}`
|
||||
)
|
||||
|
@ -7,23 +7,31 @@ go_library(
|
||||
"apply.go",
|
||||
"applyinit.go",
|
||||
"constellation.go",
|
||||
"kubernetes.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/internal/constellation",
|
||||
visibility = ["//:__subpackages__"],
|
||||
deps = [
|
||||
"//bootstrapper/initproto",
|
||||
"//internal/atls",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/constellation/kubecmd",
|
||||
"//internal/crypto",
|
||||
"//internal/file",
|
||||
"//internal/grpc/dialer",
|
||||
"//internal/grpc/grpclog",
|
||||
"//internal/grpc/retry",
|
||||
"//internal/helm",
|
||||
"//internal/kms/uri",
|
||||
"//internal/license",
|
||||
"//internal/retry",
|
||||
"//internal/semver",
|
||||
"//internal/state",
|
||||
"//internal/versions",
|
||||
"@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:apiextensions",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -38,7 +46,10 @@ go_test(
|
||||
deps = [
|
||||
"//bootstrapper/initproto",
|
||||
"//internal/atls",
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/crypto",
|
||||
"//internal/grpc/atlscredentials",
|
||||
|
@ -11,11 +11,20 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/atls"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
||||
"github.com/edgelesssys/constellation/v2/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
|
||||
"github.com/edgelesssys/constellation/v2/internal/license"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/state"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
)
|
||||
|
||||
// An Applier handles applying a specific configuration to a Constellation cluster
|
||||
@ -27,7 +36,9 @@ type Applier struct {
|
||||
spinner spinnerInterf
|
||||
|
||||
// newDialer creates a new aTLS gRPC dialer.
|
||||
newDialer func(validator atls.Validator) *dialer.Dialer
|
||||
newDialer func(validator atls.Validator) *dialer.Dialer
|
||||
kubecmdClient kubecmdClient
|
||||
helmClient helmApplier
|
||||
}
|
||||
|
||||
type licenseChecker interface {
|
||||
@ -52,6 +63,21 @@ func NewApplier(
|
||||
}
|
||||
}
|
||||
|
||||
// SetKubeConfig sets the config file to use for creating Kubernetes clients.
|
||||
func (a *Applier) SetKubeConfig(kubeConfig []byte) error {
|
||||
kubecmdClient, err := kubecmd.New(kubeConfig, a.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
helmClient, err := helm.NewClient(kubeConfig, a.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.kubecmdClient = kubecmdClient
|
||||
a.helmClient = helmClient
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckLicense checks the given Constellation license with the license server
|
||||
// and returns the allowed quota for the license.
|
||||
func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, licenseID string) (int, error) {
|
||||
@ -94,3 +120,21 @@ func (a *Applier) GenerateMeasurementSalt() ([]byte, error) {
|
||||
a.log.Debugf("Generated measurement salt")
|
||||
return measurementSalt, nil
|
||||
}
|
||||
|
||||
type helmApplier interface {
|
||||
PrepareApply(
|
||||
csp cloudprovider.Provider, attestationVariant variant.Variant, k8sVersion versions.ValidK8sVersion, microserviceVersion semver.Semver, stateFile *state.State,
|
||||
flags helm.Options, serviceAccURI string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
|
||||
) (
|
||||
helm.Applier, bool, error)
|
||||
}
|
||||
|
||||
type kubecmdClient interface {
|
||||
UpgradeNodeImage(ctx context.Context, imageVersion semver.Semver, imageReference string, force bool) error
|
||||
UpgradeKubernetesVersion(ctx context.Context, kubernetesVersion versions.ValidK8sVersion, force bool) error
|
||||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
ApplyJoinConfig(ctx context.Context, newAttestConfig config.AttestationCfg, measurementSalt []byte) error
|
||||
BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error
|
||||
BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error)
|
||||
}
|
||||
|
@ -9,6 +9,8 @@ package constellation
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
@ -17,6 +19,9 @@ import (
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/initproto"
|
||||
"github.com/edgelesssys/constellation/v2/internal/atls"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials"
|
||||
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
|
||||
@ -200,6 +205,119 @@ func TestInit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestation(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
initServerAPI := &stubInitServer{res: []*initproto.InitResponse{
|
||||
{
|
||||
Kind: &initproto.InitResponse_InitSuccess{
|
||||
InitSuccess: &initproto.InitSuccessResponse{
|
||||
Kubeconfig: []byte("kubeconfig"),
|
||||
OwnerId: []byte("ownerID"),
|
||||
ClusterId: []byte("clusterID"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
|
||||
issuer := &testIssuer{
|
||||
Getter: variant.QEMUVTPM{},
|
||||
pcrs: map[uint32][]byte{
|
||||
0: bytes.Repeat([]byte{0xFF}, 32),
|
||||
1: bytes.Repeat([]byte{0xFF}, 32),
|
||||
2: bytes.Repeat([]byte{0xFF}, 32),
|
||||
3: bytes.Repeat([]byte{0xFF}, 32),
|
||||
},
|
||||
}
|
||||
serverCreds := atlscredentials.New(issuer, nil)
|
||||
initServer := grpc.NewServer(grpc.Creds(serverCreds))
|
||||
initproto.RegisterAPIServer(initServer, initServerAPI)
|
||||
port := strconv.Itoa(constants.BootstrapperPort)
|
||||
listener := netDialer.GetListener(net.JoinHostPort("192.0.2.4", port))
|
||||
go initServer.Serve(listener)
|
||||
defer initServer.GracefulStop()
|
||||
|
||||
validator := &testValidator{
|
||||
Getter: variant.QEMUVTPM{},
|
||||
pcrs: measurements.M{
|
||||
0: measurements.WithAllBytes(0x00, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
1: measurements.WithAllBytes(0x11, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
2: measurements.WithAllBytes(0x22, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
3: measurements.WithAllBytes(0x33, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
4: measurements.WithAllBytes(0x44, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
9: measurements.WithAllBytes(0x99, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
12: measurements.WithAllBytes(0xcc, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
}
|
||||
state := &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.4"}}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
initer := &Applier{
|
||||
log: logger.NewTest(t),
|
||||
newDialer: func(v atls.Validator) *dialer.Dialer {
|
||||
return dialer.New(nil, v, netDialer)
|
||||
},
|
||||
spinner: &nopSpinner{},
|
||||
}
|
||||
|
||||
_, err := initer.Init(ctx, validator, state, io.Discard, InitPayload{
|
||||
MasterSecret: uri.MasterSecret{},
|
||||
MeasurementSalt: []byte{},
|
||||
K8sVersion: "v1.26.5",
|
||||
ConformanceMode: false,
|
||||
})
|
||||
assert.Error(err)
|
||||
// make sure the error is actually a TLS handshake error
|
||||
assert.Contains(err.Error(), "transport: authentication handshake failed")
|
||||
if validationErr, ok := err.(*config.ValidationError); ok {
|
||||
t.Log(validationErr.LongMessage())
|
||||
}
|
||||
}
|
||||
|
||||
type testValidator struct {
|
||||
variant.Getter
|
||||
pcrs measurements.M
|
||||
}
|
||||
|
||||
func (v *testValidator) Validate(_ context.Context, attDoc []byte, _ []byte) ([]byte, error) {
|
||||
var attestation struct {
|
||||
UserData []byte
|
||||
PCRs map[uint32][]byte
|
||||
}
|
||||
if err := json.Unmarshal(attDoc, &attestation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, pcr := range v.pcrs {
|
||||
if !bytes.Equal(attestation.PCRs[k], pcr.Expected[:]) {
|
||||
return nil, errors.New("invalid PCR value")
|
||||
}
|
||||
}
|
||||
return attestation.UserData, nil
|
||||
}
|
||||
|
||||
type testIssuer struct {
|
||||
variant.Getter
|
||||
pcrs map[uint32][]byte
|
||||
}
|
||||
|
||||
func (i *testIssuer) Issue(_ context.Context, userData []byte, _ []byte) ([]byte, error) {
|
||||
return json.Marshal(
|
||||
struct {
|
||||
UserData []byte
|
||||
PCRs map[uint32][]byte
|
||||
}{
|
||||
UserData: userData,
|
||||
PCRs: i.pcrs,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
type nopSpinner struct {
|
||||
io.Writer
|
||||
}
|
||||
|
@ -8,20 +8,18 @@ go_library(
|
||||
"kubecmd.go",
|
||||
"status.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/internal/kubecmd",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd",
|
||||
visibility = ["//:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/api/versionsapi",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/compatibility",
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/file",
|
||||
"//internal/imagefetcher",
|
||||
"//internal/kubernetes",
|
||||
"//internal/kubernetes/kubectl",
|
||||
"//internal/retry",
|
||||
"//internal/semver",
|
||||
"//internal/versions",
|
||||
"//internal/versions/components",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
||||
@ -47,13 +45,12 @@ go_test(
|
||||
embed = [":kubecmd"],
|
||||
deps = [
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/compatibility",
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/file",
|
||||
"//internal/logger",
|
||||
"//internal/semver",
|
||||
"//internal/versions",
|
||||
"//internal/versions/components",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
@ -11,6 +11,7 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -24,7 +25,7 @@ type crdLister interface {
|
||||
}
|
||||
|
||||
// BackupCRDs backs up all CRDs to the upgrade workspace.
|
||||
func (k *KubeCmd) BackupCRDs(ctx context.Context, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
k.log.Debugf("Starting CRD backup")
|
||||
crds, err := k.kubectl.ListCRDs(ctx)
|
||||
if err != nil {
|
||||
@ -32,7 +33,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, upgradeDir string) ([]apiexten
|
||||
}
|
||||
|
||||
crdBackupFolder := k.crdBackupFolder(upgradeDir)
|
||||
if err := k.fileHandler.MkdirAll(crdBackupFolder); err != nil {
|
||||
if err := fileHandler.MkdirAll(crdBackupFolder); err != nil {
|
||||
return nil, fmt.Errorf("creating backup dir: %w", err)
|
||||
}
|
||||
for i := range crds {
|
||||
@ -51,7 +52,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, upgradeDir string) ([]apiexten
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := k.fileHandler.Write(path, yamlBytes); err != nil {
|
||||
if err := fileHandler.Write(path, yamlBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -60,7 +61,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, upgradeDir string) ([]apiexten
|
||||
}
|
||||
|
||||
// BackupCRs backs up all CRs to the upgrade workspace.
|
||||
func (k *KubeCmd) BackupCRs(ctx context.Context, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error {
|
||||
func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error {
|
||||
k.log.Debugf("Starting CR backup")
|
||||
for _, crd := range crds {
|
||||
k.log.Debugf("Creating backup for resource type: %s", crd.Name)
|
||||
@ -86,7 +87,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, crds []apiextensionsv1.CustomRe
|
||||
backupFolder := k.backupFolder(upgradeDir)
|
||||
for _, cr := range crs {
|
||||
targetFolder := filepath.Join(backupFolder, gvr.Group, gvr.Version, cr.GetNamespace(), cr.GetKind())
|
||||
if err := k.fileHandler.MkdirAll(targetFolder); err != nil {
|
||||
if err := fileHandler.MkdirAll(targetFolder); err != nil {
|
||||
return fmt.Errorf("creating resource dir: %w", err)
|
||||
}
|
||||
path := filepath.Join(targetFolder, cr.GetName()+".yaml")
|
||||
@ -94,7 +95,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, crds []apiextensionsv1.CustomRe
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k.fileHandler.Write(path, yamlBytes); err != nil {
|
||||
if err := fileHandler.Write(path, yamlBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
@ -53,12 +53,11 @@ func TestBackupCRDs(t *testing.T) {
|
||||
err := yaml.Unmarshal([]byte(tc.crd), &crd)
|
||||
require.NoError(err)
|
||||
client := KubeCmd{
|
||||
kubectl: &stubKubectl{crds: []apiextensionsv1.CustomResourceDefinition{crd}, getCRDsError: tc.getCRDsError},
|
||||
fileHandler: file.NewHandler(memFs),
|
||||
log: stubLog{},
|
||||
kubectl: &stubKubectl{crds: []apiextensionsv1.CustomResourceDefinition{crd}, getCRDsError: tc.getCRDsError},
|
||||
log: stubLog{},
|
||||
}
|
||||
|
||||
_, err = client.BackupCRDs(context.Background(), tc.upgradeID)
|
||||
_, err = client.BackupCRDs(context.Background(), file.NewHandler(memFs), tc.upgradeID)
|
||||
if tc.wantError {
|
||||
assert.Error(err)
|
||||
return
|
||||
@ -143,12 +142,11 @@ func TestBackupCRs(t *testing.T) {
|
||||
memFs := afero.NewMemMapFs()
|
||||
|
||||
client := KubeCmd{
|
||||
kubectl: &stubKubectl{crs: []unstructured.Unstructured{tc.resource}, getCRsError: tc.getCRsError},
|
||||
fileHandler: file.NewHandler(memFs),
|
||||
log: stubLog{},
|
||||
kubectl: &stubKubectl{crs: []unstructured.Unstructured{tc.resource}, getCRsError: tc.getCRsError},
|
||||
log: stubLog{},
|
||||
}
|
||||
|
||||
err := client.BackupCRs(context.Background(), []apiextensionsv1.CustomResourceDefinition{tc.crd}, tc.upgradeID)
|
||||
err := client.BackupCRs(context.Background(), file.NewHandler(memFs), []apiextensionsv1.CustomResourceDefinition{tc.crd}, tc.upgradeID)
|
||||
if tc.wantError {
|
||||
assert.Error(err)
|
||||
return
|
@ -21,23 +21,19 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
conretry "github.com/edgelesssys/constellation/v2/internal/retry"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
@ -73,15 +69,12 @@ func (e *applyError) Error() string {
|
||||
// KubeCmd handles interaction with the cluster's components using the CLI.
|
||||
type KubeCmd struct {
|
||||
kubectl kubectlInterface
|
||||
imageFetcher imageFetcher
|
||||
outWriter io.Writer
|
||||
fileHandler file.Handler
|
||||
retryInterval time.Duration
|
||||
log debugLog
|
||||
}
|
||||
|
||||
// New returns a new KubeCmd.
|
||||
func New(outWriter io.Writer, kubeConfig []byte, fileHandler file.Handler, log debugLog) (*KubeCmd, error) {
|
||||
func New(kubeConfig []byte, log debugLog) (*KubeCmd, error) {
|
||||
client, err := kubectl.NewFromConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating kubectl client: %w", err)
|
||||
@ -89,103 +82,93 @@ func New(outWriter io.Writer, kubeConfig []byte, fileHandler file.Handler, log d
|
||||
|
||||
return &KubeCmd{
|
||||
kubectl: client,
|
||||
fileHandler: fileHandler,
|
||||
imageFetcher: imagefetcher.New(),
|
||||
outWriter: outWriter,
|
||||
retryInterval: time.Second * 5,
|
||||
log: log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||
// The versions set in the config are validated against the versions running in the cluster.
|
||||
// TODO(elchead): AB#3434 Split K8s and image upgrade of UpgradeNodeVersion.
|
||||
func (k *KubeCmd) UpgradeNodeVersion(ctx context.Context, conf *config.Config, force, skipImage, skipK8s bool) error {
|
||||
provider := conf.GetProvider()
|
||||
attestationVariant := conf.GetAttestationConfig().GetVariant()
|
||||
region := conf.GetRegion()
|
||||
imageReference, err := k.imageFetcher.FetchReference(ctx, provider, attestationVariant, conf.Image, region)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image reference: %w", err)
|
||||
}
|
||||
|
||||
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing version from image short path: %w", err)
|
||||
}
|
||||
|
||||
// UpgradeNodeImage upgrades the image version of a Constellation cluster.
|
||||
func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semver, imageReference string, force bool) error {
|
||||
nodeVersion, err := k.getConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
upgradeErrs := []error{}
|
||||
k.log.Debugf("Checking if image upgrade is valid")
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
if !skipImage {
|
||||
err = k.isValidImageUpgrade(nodeVersion, imageVersion.Version(), force)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
|
||||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
|
||||
// TODO(3u13r): remove `reconcileKubeadmConfigMap` after v2.14.0 has been released.
|
||||
if err := k.reconcileKubeadmConfigMap(ctx); err != nil {
|
||||
return fmt.Errorf("reconciling kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.Version())
|
||||
nodeVersion.Spec.ImageReference = imageReference
|
||||
nodeVersion.Spec.ImageVersion = imageVersion.Version()
|
||||
err = k.isValidImageUpgrade(nodeVersion, imageVersion.String(), force)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
return fmt.Errorf("skipping image upgrade: %w", err)
|
||||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
|
||||
if !skipK8s {
|
||||
// We have to allow users to specify outdated k8s patch versions.
|
||||
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
|
||||
var components *corev1.ConfigMap
|
||||
_, err = versions.NewValidK8sVersion(string(conf.KubernetesVersion), true)
|
||||
if err != nil {
|
||||
innerErr := fmt.Errorf("unsupported Kubernetes version, supported versions are %s",
|
||||
strings.Join(versions.SupportedK8sVersions(), ", "))
|
||||
err = compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion,
|
||||
string(conf.KubernetesVersion), innerErr)
|
||||
// TODO(3u13r): remove `reconcileKubeadmConfigMap` after v2.14.0 has been released.
|
||||
if err := k.reconcileKubeadmConfigMap(ctx); err != nil {
|
||||
return fmt.Errorf("reconciling kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())
|
||||
nodeVersion.Spec.ImageReference = imageReference
|
||||
nodeVersion.Spec.ImageVersion = imageVersion.String()
|
||||
|
||||
updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying upgrade: %w", err)
|
||||
}
|
||||
return checkForApplyError(nodeVersion, updatedNodeVersion)
|
||||
}
|
||||
|
||||
// UpgradeKubernetesVersion upgrades the Kubernetes version of a Constellation cluster.
|
||||
func (k *KubeCmd) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersion versions.ValidK8sVersion, force bool) error {
|
||||
nodeVersion, err := k.getConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
// We have to allow users to specify outdated k8s patch versions.
|
||||
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
|
||||
var components *corev1.ConfigMap
|
||||
_, err = versions.NewValidK8sVersion(string(kubernetesVersion), true)
|
||||
if err != nil {
|
||||
err = compatibility.NewInvalidUpgradeError(
|
||||
nodeVersion.Spec.KubernetesClusterVersion,
|
||||
string(kubernetesVersion),
|
||||
fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", ")),
|
||||
)
|
||||
} else {
|
||||
versionConfig, ok := versions.VersionConfigs[kubernetesVersion]
|
||||
if !ok {
|
||||
err = compatibility.NewInvalidUpgradeError(
|
||||
nodeVersion.Spec.KubernetesClusterVersion,
|
||||
string(kubernetesVersion),
|
||||
fmt.Errorf("no version config matching K8s %s", kubernetesVersion),
|
||||
)
|
||||
} else {
|
||||
versionConfig, ok := versions.VersionConfigs[conf.KubernetesVersion]
|
||||
if !ok {
|
||||
err = compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion,
|
||||
string(conf.KubernetesVersion), fmt.Errorf("no version config matching K8s %s", conf.KubernetesVersion))
|
||||
} else {
|
||||
components, err = k.prepareUpdateK8s(&nodeVersion, versionConfig.ClusterVersion,
|
||||
versionConfig.KubernetesComponents, force)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
err := k.applyComponentsCM(ctx, components)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
|
||||
}
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
|
||||
default:
|
||||
return fmt.Errorf("updating Kubernetes version: %w", err)
|
||||
components, err = k.prepareUpdateK8s(&nodeVersion, versionConfig.ClusterVersion,
|
||||
versionConfig.KubernetesComponents, force)
|
||||
}
|
||||
}
|
||||
if len(upgradeErrs) == 2 {
|
||||
return errors.Join(upgradeErrs...)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
err := k.applyComponentsCM(ctx, components)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
|
||||
}
|
||||
case errors.As(err, &upgradeErr):
|
||||
return fmt.Errorf("skipping Kubernetes upgrade: %w", err)
|
||||
default:
|
||||
return fmt.Errorf("updating Kubernetes version: %w", err)
|
||||
}
|
||||
|
||||
updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying upgrade: %w", err)
|
||||
}
|
||||
|
||||
if err := checkForApplyError(nodeVersion, updatedNodeVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Join(upgradeErrs...)
|
||||
return checkForApplyError(nodeVersion, updatedNodeVersion)
|
||||
}
|
||||
|
||||
// ClusterStatus returns a map from node name to NodeStatus.
|
||||
@ -203,8 +186,8 @@ func (k *KubeCmd) ClusterStatus(ctx context.Context) (map[string]NodeStatus, err
|
||||
return clusterStatus, nil
|
||||
}
|
||||
|
||||
// GetClusterAttestationConfig fetches the join-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the attestation config.
|
||||
// GetClusterAttestationConfig fetches the join-config configmap from the cluster,
|
||||
// and returns the attestation config.
|
||||
func (k *KubeCmd) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) {
|
||||
existingConf, err := retryGetJoinConfig(ctx, k.kubectl, k.retryInterval, k.log)
|
||||
if err != nil {
|
||||
@ -262,7 +245,7 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At
|
||||
}
|
||||
|
||||
// ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs.
|
||||
// Existing SANs are preserved.
|
||||
// Empty strings are ignored, existing SANs are preserved.
|
||||
func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error {
|
||||
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
|
||||
if err != nil {
|
||||
@ -305,7 +288,7 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(k.outWriter, "Successfully extended the cluster's apiserver SAN field")
|
||||
k.log.Debugf("Successfully extended the cluster's apiserver SAN field")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -434,7 +417,7 @@ func (k *KubeCmd) reconcileKubeadmConfigMap(ctx context.Context) error {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(k.outWriter, "Successfully reconciled the cluster's kubeadm config")
|
||||
k.log.Debugf("Successfully reconciled the cluster's kubeadm config")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -565,11 +548,3 @@ type kubectlInterface interface {
|
||||
type debugLog interface {
|
||||
Debugf(format string, args ...any)
|
||||
}
|
||||
|
||||
// imageFetcher gets an image reference from the versionsapi.
|
||||
type imageFetcher interface {
|
||||
FetchReference(ctx context.Context,
|
||||
provider cloudprovider.Provider, attestationVariant variant.Variant,
|
||||
image, region string,
|
||||
) (string, error)
|
||||
}
|
@ -10,18 +10,17 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
@ -38,7 +37,7 @@ import (
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestUpgradeNodeVersion(t *testing.T) {
|
||||
func TestUpgradeNodeImage(t *testing.T) {
|
||||
clusterConf := kubeadmv1beta3.ClusterConfiguration{
|
||||
APIServer: kubeadmv1beta3.APIServer{
|
||||
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
|
||||
@ -91,286 +90,99 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
kubectl *stubKubectl
|
||||
conditions []metav1.Condition
|
||||
currentImageVersion string
|
||||
newImageReference string
|
||||
badImageVersion string
|
||||
currentClusterVersion versions.ValidK8sVersion
|
||||
conf *config.Config
|
||||
force bool
|
||||
getCRErr error
|
||||
wantErr bool
|
||||
wantUpdate bool
|
||||
assertCorrectError func(t *testing.T, err error) bool
|
||||
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
|
||||
conditions []metav1.Condition
|
||||
currentImageVersion semver.Semver
|
||||
newImageVersion semver.Semver
|
||||
badImageVersion string
|
||||
force bool
|
||||
customKubeadmConfig *corev1.ConfigMap
|
||||
getCRErr error
|
||||
wantErr bool
|
||||
wantUpdate bool
|
||||
assertCorrectError func(t *testing.T, err error) bool
|
||||
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
|
||||
}{
|
||||
"success": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"success with konnectivity migration": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfigWithKonnectivity,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
customKubeadmConfig: validKubeadmConfigWithKonnectivity,
|
||||
wantUpdate: true,
|
||||
},
|
||||
"only k8s upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.2"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"only image upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[0]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
"success": {
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
wantUpdate: true,
|
||||
},
|
||||
"not an upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.2"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[0]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"upgrade in progress": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
conditions: []metav1.Condition{{
|
||||
Type: updatev1alpha1.ConditionOutdated,
|
||||
Status: metav1.ConditionTrue,
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, ErrInProgress)
|
||||
},
|
||||
},
|
||||
"success with force and upgrade in progress": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
conditions: []metav1.Condition{{
|
||||
Type: updatev1alpha1.ConditionOutdated,
|
||||
Status: metav1.ConditionTrue,
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
force: true,
|
||||
wantUpdate: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
force: true,
|
||||
wantUpdate: true,
|
||||
},
|
||||
"get error": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
getCRErr: assert.AnError,
|
||||
wantErr: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
getCRErr: assert.AnError,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, assert.AnError)
|
||||
},
|
||||
},
|
||||
"image too new valid k8s": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.4.2"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
newImageReference: "path/to/image:v1.4.2",
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
"image too new": {
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 4, 3, ""),
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"success with force and image too new": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.4.2"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
newImageReference: "path/to/image:v1.4.2",
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
force: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 4, ""),
|
||||
wantUpdate: true,
|
||||
force: true,
|
||||
},
|
||||
"apply returns bad object": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
badImageVersion: "v3.2.1",
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
badImageVersion: "v3.2.1",
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var target *applyError
|
||||
return assert.ErrorAs(t, err, &target)
|
||||
},
|
||||
},
|
||||
"outdated k8s version skips k8s upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.2"
|
||||
conf.KubernetesVersion = "v1.25.8"
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: false,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"succeed after update retry when the updated node object is outdated": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = supportedValidK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: supportedValidK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
},
|
||||
wantUpdate: false, // because customClient is used
|
||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||
wantUpdate: false, // because customClient is used
|
||||
customClientFn: func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface {
|
||||
fakeClient := &fakeUnstructuredClient{}
|
||||
fakeClient.On("GetCR", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 1), nil)
|
||||
@ -388,13 +200,15 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
|
||||
nodeVersion := updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: tc.currentImageVersion,
|
||||
KubernetesClusterVersion: string(tc.currentClusterVersion),
|
||||
ImageVersion: tc.currentImageVersion.String(),
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: tc.conditions,
|
||||
},
|
||||
}
|
||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
|
||||
var badUpdatedObject *unstructured.Unstructured
|
||||
if tc.badImageVersion != "" {
|
||||
@ -404,26 +218,30 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
badUpdatedObject = &unstructured.Unstructured{Object: unstrBadNodeVersion}
|
||||
}
|
||||
|
||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
unstructuredClient := &stubUnstructuredClient{
|
||||
object: &unstructured.Unstructured{Object: unstrNodeVersion},
|
||||
badUpdatedObject: badUpdatedObject,
|
||||
getCRErr: tc.getCRErr,
|
||||
}
|
||||
tc.kubectl.unstructuredInterface = unstructuredClient
|
||||
kubectl := &stubKubectl{
|
||||
unstructuredInterface: unstructuredClient,
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
||||
},
|
||||
}
|
||||
if tc.customClientFn != nil {
|
||||
tc.kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
||||
kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
||||
}
|
||||
if tc.customKubeadmConfig != nil {
|
||||
kubectl.configMaps[constants.KubeadmConfigMap] = tc.customKubeadmConfig
|
||||
}
|
||||
|
||||
upgrader := KubeCmd{
|
||||
kubectl: tc.kubectl,
|
||||
imageFetcher: &stubImageFetcher{reference: tc.newImageReference},
|
||||
log: logger.NewTest(t),
|
||||
outWriter: io.Discard,
|
||||
kubectl: kubectl,
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf, tc.force, false, false)
|
||||
err = upgrader.UpgradeNodeImage(context.Background(), tc.newImageVersion, fmt.Sprintf("/path/to/image:%s", tc.newImageVersion.String()), tc.force)
|
||||
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
|
||||
if tc.wantUpdate {
|
||||
assert.NotNil(unstructuredClient.updatedObject)
|
||||
@ -437,17 +255,128 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
// The ConfigMap only exists in the updatedConfigMaps map it needed to remove the Konnectivity values
|
||||
if strings.Contains(tc.kubectl.configMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds") {
|
||||
assert.NotContains(tc.kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds")
|
||||
assert.NotContains(tc.kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-config")
|
||||
assert.NotContains(tc.kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-selector-config-file")
|
||||
// If the ConfigMap only exists in the updatedConfigMaps map, the Konnectivity values should have been removed
|
||||
if strings.Contains(kubectl.configMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds") {
|
||||
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds")
|
||||
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-config")
|
||||
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-selector-config-file")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateImage(t *testing.T) {
|
||||
func TestUpgradeKubernetesVersion(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
conditions []metav1.Condition
|
||||
newKubernetesVersion versions.ValidK8sVersion
|
||||
currentKubernetesVersion versions.ValidK8sVersion
|
||||
force bool
|
||||
getCRErr error
|
||||
wantErr bool
|
||||
wantUpdate bool
|
||||
assertCorrectError func(t *testing.T, err error) bool
|
||||
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
|
||||
}{
|
||||
"success": {
|
||||
currentKubernetesVersion: supportedValidK8sVersions()[0],
|
||||
newKubernetesVersion: supportedValidK8sVersions()[1],
|
||||
wantUpdate: true,
|
||||
wantErr: false,
|
||||
},
|
||||
"not an upgrade": {
|
||||
currentKubernetesVersion: supportedValidK8sVersions()[0],
|
||||
newKubernetesVersion: supportedValidK8sVersions()[0],
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"get error": {
|
||||
currentKubernetesVersion: supportedValidK8sVersions()[0],
|
||||
newKubernetesVersion: supportedValidK8sVersions()[1],
|
||||
getCRErr: assert.AnError,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, assert.AnError)
|
||||
},
|
||||
},
|
||||
"outdated k8s version skips k8s upgrade": {
|
||||
currentKubernetesVersion: supportedValidK8sVersions()[0],
|
||||
newKubernetesVersion: versions.ValidK8sVersion("v1.1.0"),
|
||||
wantUpdate: false,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"succeed after update retry when the updated node object is outdated": {
|
||||
currentKubernetesVersion: supportedValidK8sVersions()[0],
|
||||
newKubernetesVersion: supportedValidK8sVersions()[1],
|
||||
wantUpdate: false, // because customClient is used
|
||||
customClientFn: func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface {
|
||||
fakeClient := &fakeUnstructuredClient{}
|
||||
fakeClient.On("GetCR", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 1), nil)
|
||||
fakeClient.On("UpdateCR", mock.Anything, mock.Anything).Return(nil, k8serrors.NewConflict(schema.GroupResource{Resource: nodeVersion.Name}, nodeVersion.Name, nil)).Once()
|
||||
fakeClient.On("UpdateCR", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 2), nil).Once()
|
||||
return fakeClient
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
nodeVersion := updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.2.3",
|
||||
KubernetesClusterVersion: string(tc.currentKubernetesVersion),
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: tc.conditions,
|
||||
},
|
||||
}
|
||||
|
||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
unstructuredClient := &stubUnstructuredClient{
|
||||
object: &unstructured.Unstructured{Object: unstrNodeVersion},
|
||||
getCRErr: tc.getCRErr,
|
||||
}
|
||||
kubectl := &stubKubectl{
|
||||
unstructuredInterface: unstructuredClient,
|
||||
}
|
||||
if tc.customClientFn != nil {
|
||||
kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
||||
}
|
||||
|
||||
upgrader := KubeCmd{
|
||||
kubectl: kubectl,
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
err = upgrader.UpgradeKubernetesVersion(context.Background(), tc.newKubernetesVersion, tc.force)
|
||||
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
|
||||
if tc.wantUpdate {
|
||||
assert.NotNil(unstructuredClient.updatedObject)
|
||||
} else {
|
||||
assert.Nil(unstructuredClient.updatedObject)
|
||||
}
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
tc.assertCorrectError(t, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidImageUpgrade(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
newImageReference string
|
||||
@ -729,7 +658,6 @@ func TestApplyJoinConfig(t *testing.T) {
|
||||
kubectl: tc.kubectl,
|
||||
log: logger.NewTest(t),
|
||||
retryInterval: time.Millisecond,
|
||||
outWriter: io.Discard,
|
||||
}
|
||||
|
||||
err := cmd.ApplyJoinConfig(context.Background(), tc.newAttestationCfg, []byte{0x11})
|
||||
@ -839,18 +767,6 @@ func (s *stubKubectl) GetNodes(_ context.Context) ([]corev1.Node, error) {
|
||||
return s.nodes, s.nodesErr
|
||||
}
|
||||
|
||||
type stubImageFetcher struct {
|
||||
reference string
|
||||
fetchReferenceErr error
|
||||
}
|
||||
|
||||
func (f *stubImageFetcher) FetchReference(_ context.Context,
|
||||
_ cloudprovider.Provider, _ variant.Variant,
|
||||
_, _ string,
|
||||
) (string, error) {
|
||||
return f.reference, f.fetchReferenceErr
|
||||
}
|
||||
|
||||
func unstructedObjectWithGeneration(nodeVersion updatev1alpha1.NodeVersion, generation int64) *unstructured.Unstructured {
|
||||
unstrNodeVersion, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
object := &unstructured.Unstructured{Object: unstrNodeVersion}
|
89
internal/constellation/kubernetes.go
Normal file
89
internal/constellation/kubernetes.go
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package constellation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/semver"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
)
|
||||
|
||||
var errKubecmdNotInitialised = errors.New("kubernetes client not initialized")
|
||||
|
||||
// ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs.
|
||||
func (a *Applier) ExtendClusterConfigCertSANs(ctx context.Context, clusterEndpoint, customEndpoint string, additionalAPIServerCertSANs []string) error {
|
||||
if a.kubecmdClient == nil {
|
||||
return errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
sans := append([]string{clusterEndpoint, customEndpoint}, additionalAPIServerCertSANs...)
|
||||
if err := a.kubecmdClient.ExtendClusterConfigCertSANs(ctx, sans); err != nil {
|
||||
return fmt.Errorf("extending cert SANs: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterAttestationConfig returns the attestation config currently set for the cluster.
|
||||
func (a *Applier) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) {
|
||||
if a.kubecmdClient == nil {
|
||||
return nil, errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
return a.kubecmdClient.GetClusterAttestationConfig(ctx, variant)
|
||||
}
|
||||
|
||||
// ApplyJoinConfig creates or updates the Constellation cluster's join-config ConfigMap.
|
||||
func (a *Applier) ApplyJoinConfig(ctx context.Context, newAttestConfig config.AttestationCfg, measurementSalt []byte) error {
|
||||
if a.kubecmdClient == nil {
|
||||
return errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
return a.kubecmdClient.ApplyJoinConfig(ctx, newAttestConfig, measurementSalt)
|
||||
}
|
||||
|
||||
// UpgradeNodeImage upgrades the node image of the cluster to the given version.
|
||||
func (a *Applier) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semver, imageReference string, force bool) error {
|
||||
if a.kubecmdClient == nil {
|
||||
return errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
return a.kubecmdClient.UpgradeNodeImage(ctx, imageVersion, imageReference, force)
|
||||
}
|
||||
|
||||
// UpgradeKubernetesVersion upgrades the Kubernetes version of the cluster to the given version.
|
||||
func (a *Applier) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersion versions.ValidK8sVersion, force bool) error {
|
||||
if a.kubecmdClient == nil {
|
||||
return errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
return a.kubecmdClient.UpgradeKubernetesVersion(ctx, kubernetesVersion, force)
|
||||
}
|
||||
|
||||
// BackupCRDs backs up all CRDs to the upgrade workspace.
|
||||
func (a *Applier) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
if a.kubecmdClient == nil {
|
||||
return nil, errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
return a.kubecmdClient.BackupCRDs(ctx, fileHandler, upgradeDir)
|
||||
}
|
||||
|
||||
// BackupCRs backs up all CRs to the upgrade workspace.
|
||||
func (a *Applier) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error {
|
||||
if a.kubecmdClient == nil {
|
||||
return errKubecmdNotInitialised
|
||||
}
|
||||
|
||||
return a.kubecmdClient.BackupCRs(ctx, fileHandler, crds, upgradeDir)
|
||||
}
|
Loading…
Reference in New Issue
Block a user