mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
cli: restructure upgrade apply
(#1319)
Applies the updated NodeVersion object with one request instead of two. This makes sure that the first request does not accidentially put the cluster into a "updgrade in progress" status. Which would lead users to having to run apply twice.
This commit is contained in:
parent
57d675c819
commit
f0db5d0395
@ -16,6 +16,11 @@ import (
|
|||||||
tfjson "github.com/hashicorp/terraform-json"
|
tfjson "github.com/hashicorp/terraform-json"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// imageFetcher gets an image reference from the versionsapi.
|
||||||
|
type imageFetcher interface {
|
||||||
|
FetchReference(ctx context.Context, config *config.Config) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
type terraformClient interface {
|
type terraformClient interface {
|
||||||
PrepareWorkspace(path string, input terraform.Variables) error
|
PrepareWorkspace(path string, input terraform.Variables) error
|
||||||
CreateCluster(ctx context.Context) (terraform.CreateOutput, error)
|
CreateCluster(ctx context.Context) (terraform.CreateOutput, error)
|
||||||
@ -31,10 +36,6 @@ type libvirtRunner interface {
|
|||||||
Stop(ctx context.Context) error
|
Stop(ctx context.Context) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type imageFetcher interface {
|
|
||||||
FetchReference(ctx context.Context, config *config.Config) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type rawDownloader interface {
|
type rawDownloader interface {
|
||||||
Download(ctx context.Context, errWriter io.Writer, isTTY bool, source, version string) (string, error)
|
Download(ctx context.Context, errWriter io.Writer, isTTY bool, source, version string) (string, error)
|
||||||
}
|
}
|
||||||
|
@ -15,13 +15,16 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||||
|
"github.com/edgelesssys/constellation/v2/cli/internal/image"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
|
||||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -42,9 +45,9 @@ type Upgrader struct {
|
|||||||
stableInterface stableInterface
|
stableInterface stableInterface
|
||||||
dynamicInterface dynamicInterface
|
dynamicInterface dynamicInterface
|
||||||
helmClient helmInterface
|
helmClient helmInterface
|
||||||
|
imageFetcher imageFetcher
|
||||||
outWriter io.Writer
|
outWriter io.Writer
|
||||||
log debugLog
|
log debugLog
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUpgrader returns a new Upgrader.
|
// NewUpgrader returns a new Upgrader.
|
||||||
@ -74,100 +77,145 @@ func NewUpgrader(outWriter io.Writer, log debugLog) (*Upgrader, error) {
|
|||||||
stableInterface: &stableClient{client: kubeClient},
|
stableInterface: &stableClient{client: kubeClient},
|
||||||
dynamicInterface: &dynamicClient{client: unstructuredClient},
|
dynamicInterface: &dynamicClient{client: unstructuredClient},
|
||||||
helmClient: helmClient,
|
helmClient: helmClient,
|
||||||
|
imageFetcher: image.New(),
|
||||||
outWriter: outWriter,
|
outWriter: outWriter,
|
||||||
log: log,
|
log: log,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpgradeImage upgrades the cluster to the given measurements and image.
|
|
||||||
func (u *Upgrader) UpgradeImage(ctx context.Context, newImageReference, newImageVersion string, newMeasurements measurements.M) error {
|
|
||||||
nodeVersion, err := u.getConstellationVersion(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("retrieving current image: %w", err)
|
|
||||||
}
|
|
||||||
currentImageVersion := nodeVersion.Spec.ImageVersion
|
|
||||||
|
|
||||||
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if imageUpgradeInProgress(nodeVersion) {
|
|
||||||
return ErrInProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := u.updateMeasurements(ctx, newMeasurements); err != nil {
|
|
||||||
return fmt.Errorf("updating measurements: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := u.updateImage(ctx, nodeVersion, newImageReference, newImageVersion); err != nil {
|
|
||||||
return fmt.Errorf("updating image: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpgradeHelmServices upgrade helm services.
|
// UpgradeHelmServices upgrade helm services.
|
||||||
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
||||||
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive)
|
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpgradeK8s upgrade the Kubernetes cluster version and the installed components to matching versions.
|
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||||
func (u *Upgrader) UpgradeK8s(ctx context.Context, newClusterVersion string, components components.Components) error {
|
// The versions set in the config are validated against the versions running in the cluster.
|
||||||
nodeVersion, err := u.getConstellationVersion(ctx)
|
func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config) error {
|
||||||
|
imageReference, err := u.imageFetcher.FetchReference(ctx, conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("getting kubernetesClusterVersion: %w", err)
|
return fmt.Errorf("fetching image reference: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
|
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing version from image short path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentK8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting Kubernetes version: %w", err)
|
||||||
|
}
|
||||||
|
versionConfig := versions.VersionConfigs[currentK8sVersion]
|
||||||
|
|
||||||
|
nodeVersion, err := u.checkClusterStatus(ctx)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if k8sUpgradeInProgress(nodeVersion) {
|
upgradeErrs := []error{}
|
||||||
return ErrInProgress
|
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||||
|
err = u.updateImage(&nodeVersion, imageReference, imageVersion.Version)
|
||||||
|
if errors.As(err, &upgradeErr) {
|
||||||
|
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
u.log.Debugf("Upgrading cluster's Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
|
components, err := u.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
|
||||||
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
|
if errors.As(err, &upgradeErr) {
|
||||||
|
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(upgradeErrs) == 2 {
|
||||||
|
return errors.Join(upgradeErrs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := u.updateMeasurements(ctx, conf.GetMeasurements()); err != nil {
|
||||||
|
return fmt.Errorf("updating measurements: %w", err)
|
||||||
|
}
|
||||||
|
updatedNodeVersion, err := u.applyUpgrade(ctx, &components, nodeVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
|
return fmt.Errorf("applying upgrade: %w", err)
|
||||||
|
}
|
||||||
|
if updatedNodeVersion.Spec.ImageReference != imageReference ||
|
||||||
|
updatedNodeVersion.Spec.ImageVersion != imageVersion.Version ||
|
||||||
|
updatedNodeVersion.Spec.KubernetesComponentsReference != components.ObjectMeta.Name ||
|
||||||
|
updatedNodeVersion.Spec.KubernetesClusterVersion != versionConfig.ClusterVersion {
|
||||||
|
return errors.New("unexpected value in updated nodeVersion object")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = u.stableInterface.createConfigMap(ctx, &configMap)
|
return errors.Join(upgradeErrs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Upgrader) applyUpgrade(ctx context.Context, components *corev1.ConfigMap, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) {
|
||||||
|
_, err := u.stableInterface.createConfigMap(ctx, components)
|
||||||
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
|
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
|
||||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||||
return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
|
return updatev1alpha1.NodeVersion{}, fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
|
|
||||||
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
|
|
||||||
|
|
||||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
|
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting nodeVersion to unstructured: %w", err)
|
||||||
}
|
}
|
||||||
u.log.Debugf("Triggering Kubernetes version upgrade now")
|
u.log.Debugf("Triggering Kubernetes version upgrade now")
|
||||||
// Send the updated NodeVersion resource
|
// Send the updated NodeVersion resource
|
||||||
updated, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw})
|
updated, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("updating NodeVersion: %w", err)
|
return updatev1alpha1.NodeVersion{}, fmt.Errorf("updating NodeVersion: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the update worked as expected
|
var updatedNodeVersion updatev1alpha1.NodeVersion
|
||||||
updatedSpec, ok := updated.Object["spec"]
|
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
|
||||||
if !ok {
|
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||||
return errors.New("invalid updated NodeVersion spec")
|
|
||||||
}
|
|
||||||
updatedMap, ok := updatedSpec.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("invalid updated NodeVersion spec")
|
|
||||||
}
|
|
||||||
if updatedMap["kubernetesComponentsReference"] != configMap.ObjectMeta.Name || updatedMap["kubernetesClusterVersion"] != newClusterVersion {
|
|
||||||
return errors.New("failed to update NodeVersion resource")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(u.outWriter, "Successfully updated the cluster's Kubernetes version to %s\n", newClusterVersion)
|
return updatedNodeVersion, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Upgrader) checkClusterStatus(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
|
||||||
|
nodeVersion, err := u.getConstellationVersion(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return updatev1alpha1.NodeVersion{}, fmt.Errorf("retrieving current image: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if upgradeInProgress(nodeVersion) {
|
||||||
|
return updatev1alpha1.NodeVersion{}, ErrInProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodeVersion, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateImage upgrades the cluster to the given measurements and image.
|
||||||
|
func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImageReference, newImageVersion string) error {
|
||||||
|
currentImageVersion := nodeVersion.Spec.ImageVersion
|
||||||
|
|
||||||
|
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
u.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
|
||||||
|
nodeVersion.Spec.ImageReference = newImageReference
|
||||||
|
nodeVersion.Spec.ImageVersion = newImageVersion
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (u *Upgrader) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components) (corev1.ConfigMap, error) {
|
||||||
|
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
|
||||||
|
if err != nil {
|
||||||
|
return corev1.ConfigMap{}, fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
|
||||||
|
return corev1.ConfigMap{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
|
||||||
|
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
|
||||||
|
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
|
||||||
|
|
||||||
|
return configMap, nil
|
||||||
|
}
|
||||||
|
|
||||||
// KubernetesVersion returns the version of Kubernetes the Constellation is currently running on.
|
// KubernetesVersion returns the version of Kubernetes the Constellation is currently running on.
|
||||||
func (u *Upgrader) KubernetesVersion() (string, error) {
|
func (u *Upgrader) KubernetesVersion() (string, error) {
|
||||||
return u.stableInterface.kubernetesVersion()
|
return u.stableInterface.kubernetesVersion()
|
||||||
@ -247,27 +295,9 @@ func (u *Upgrader) updateMeasurements(ctx context.Context, newMeasurements measu
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *Upgrader) updateImage(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion, newImageRef, newImageVersion string) error {
|
// upgradeInProgress checks if an upgrade is in progress.
|
||||||
u.log.Debugf("Upgrading cluster's image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
|
|
||||||
nodeVersion.Spec.ImageReference = newImageRef
|
|
||||||
nodeVersion.Spec.ImageVersion = newImageVersion
|
|
||||||
|
|
||||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
|
|
||||||
}
|
|
||||||
u.log.Debugf("Triggering image version upgrade now")
|
|
||||||
if _, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw}); err != nil {
|
|
||||||
return fmt.Errorf("setting new image: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(u.outWriter, "Successfully updated the cluster's image version to %s\n", newImageVersion)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// k8sUpgradeInProgress checks if a k8s upgrade is in progress.
|
|
||||||
// Returns true with errors as it's the "safer" response. If caller does not check err they at least won't update the cluster.
|
// Returns true with errors as it's the "safer" response. If caller does not check err they at least won't update the cluster.
|
||||||
func k8sUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
func upgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
||||||
conditions := nodeVersion.Status.Conditions
|
conditions := nodeVersion.Status.Conditions
|
||||||
activeUpgrade := nodeVersion.Status.ActiveClusterVersionUpgrade
|
activeUpgrade := nodeVersion.Status.ActiveClusterVersionUpgrade
|
||||||
|
|
||||||
@ -283,15 +313,6 @@ func k8sUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func imageUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
|
||||||
for _, condition := range nodeVersion.Status.Conditions {
|
|
||||||
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type dynamicInterface interface {
|
type dynamicInterface interface {
|
||||||
getCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error)
|
getCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error)
|
||||||
update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
||||||
|
@ -15,8 +15,10 @@ import (
|
|||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -27,122 +29,28 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUpgradeK8s(t *testing.T) {
|
func TestUpgradeNodeVersion(t *testing.T) {
|
||||||
someErr := errors.New("some error")
|
someErr := errors.New("some error")
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
stable stubStableClient
|
stable *stubStableClient
|
||||||
conditions []metav1.Condition
|
conditions []metav1.Condition
|
||||||
activeClusterVersionUpgrade bool
|
currentImageVersion string
|
||||||
newClusterVersion string
|
currentClusterVersion string
|
||||||
currentClusterVersion string
|
conf *config.Config
|
||||||
components components.Components
|
getErr error
|
||||||
getErr error
|
wantErr bool
|
||||||
assertCorrectError func(t *testing.T, err error) bool
|
wantUpdate bool
|
||||||
wantErr bool
|
assertCorrectError func(t *testing.T, err error) bool
|
||||||
}{
|
}{
|
||||||
"success": {
|
"success": {
|
||||||
currentClusterVersion: "v1.2.2",
|
conf: func() *config.Config {
|
||||||
newClusterVersion: "v1.2.3",
|
conf := config.Default()
|
||||||
},
|
conf.Image = "v1.2.3"
|
||||||
"not an upgrade": {
|
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||||
currentClusterVersion: "v1.2.3",
|
return conf
|
||||||
newClusterVersion: "v1.2.3",
|
}(),
|
||||||
wantErr: true,
|
currentImageVersion: "v1.2.2",
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||||
target := &compatibility.InvalidUpgradeError{}
|
|
||||||
return assert.ErrorAs(t, err, &target)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"downgrade": {
|
|
||||||
currentClusterVersion: "v1.2.3",
|
|
||||||
newClusterVersion: "v1.2.2",
|
|
||||||
wantErr: true,
|
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
|
||||||
target := &compatibility.InvalidUpgradeError{}
|
|
||||||
return assert.ErrorAs(t, err, &target)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"no constellation-version object": {
|
|
||||||
getErr: someErr,
|
|
||||||
wantErr: true,
|
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
|
||||||
return assert.ErrorIs(t, err, someErr)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"upgrade in progress": {
|
|
||||||
currentClusterVersion: "v1.2.2",
|
|
||||||
newClusterVersion: "v1.2.3",
|
|
||||||
conditions: []metav1.Condition{{
|
|
||||||
Type: updatev1alpha1.ConditionOutdated,
|
|
||||||
Status: metav1.ConditionTrue,
|
|
||||||
}},
|
|
||||||
wantErr: true,
|
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
|
||||||
return assert.ErrorIs(t, err, ErrInProgress)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"configmap create fails": {
|
|
||||||
currentClusterVersion: "v1.2.2",
|
|
||||||
newClusterVersion: "v1.2.3",
|
|
||||||
stable: stubStableClient{
|
|
||||||
createErr: someErr,
|
|
||||||
},
|
|
||||||
wantErr: true,
|
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
|
||||||
return assert.ErrorIs(t, err, someErr)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
nodeVersion := updatev1alpha1.NodeVersion{
|
|
||||||
Spec: updatev1alpha1.NodeVersionSpec{
|
|
||||||
KubernetesClusterVersion: tc.currentClusterVersion,
|
|
||||||
},
|
|
||||||
Status: updatev1alpha1.NodeVersionStatus{
|
|
||||||
Conditions: tc.conditions,
|
|
||||||
ActiveClusterVersionUpgrade: tc.activeClusterVersionUpgrade,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
upgrader := Upgrader{
|
|
||||||
stableInterface: &tc.stable,
|
|
||||||
dynamicInterface: &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr},
|
|
||||||
log: logger.NewTest(t),
|
|
||||||
outWriter: io.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = upgrader.UpgradeK8s(context.Background(), tc.newClusterVersion, tc.components)
|
|
||||||
if tc.wantErr {
|
|
||||||
tc.assertCorrectError(t, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
assert.NoError(err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpgradeImage(t *testing.T) {
|
|
||||||
someErr := errors.New("some error")
|
|
||||||
testCases := map[string]struct {
|
|
||||||
stable *stubStableClient
|
|
||||||
conditions []metav1.Condition
|
|
||||||
currentImageVersion string
|
|
||||||
newImageVersion string
|
|
||||||
getErr error
|
|
||||||
wantErr bool
|
|
||||||
wantUpdate bool
|
|
||||||
assertCorrectError func(t *testing.T, err error) bool
|
|
||||||
}{
|
|
||||||
"success": {
|
|
||||||
currentImageVersion: "v1.2.2",
|
|
||||||
newImageVersion: "v1.2.3",
|
|
||||||
stable: &stubStableClient{
|
stable: &stubStableClient{
|
||||||
configMap: &corev1.ConfigMap{
|
configMap: &corev1.ConfigMap{
|
||||||
Data: map[string]string{
|
Data: map[string]string{
|
||||||
@ -152,37 +60,93 @@ func TestUpgradeImage(t *testing.T) {
|
|||||||
},
|
},
|
||||||
wantUpdate: true,
|
wantUpdate: true,
|
||||||
},
|
},
|
||||||
"not an upgrade": {
|
"only k8s upgrade": {
|
||||||
currentImageVersion: "v1.2.2",
|
conf: func() *config.Config {
|
||||||
newImageVersion: "v1.2.2",
|
conf := config.Default()
|
||||||
wantErr: true,
|
conf.Image = "v1.2.2"
|
||||||
|
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||||
|
return conf
|
||||||
|
}(),
|
||||||
|
currentImageVersion: "v1.2.2",
|
||||||
|
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||||
|
stable: &stubStableClient{
|
||||||
|
configMap: &corev1.ConfigMap{
|
||||||
|
Data: map[string]string{
|
||||||
|
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantUpdate: true,
|
||||||
|
wantErr: true,
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
assertCorrectError: func(t *testing.T, err error) bool {
|
||||||
target := &compatibility.InvalidUpgradeError{}
|
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||||
return assert.ErrorAs(t, err, &target)
|
return assert.ErrorAs(t, err, &upgradeErr)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"downgrade": {
|
"only image upgrade": {
|
||||||
currentImageVersion: "v1.2.2",
|
conf: func() *config.Config {
|
||||||
newImageVersion: "v1.2.1",
|
conf := config.Default()
|
||||||
wantErr: true,
|
conf.Image = "v1.2.3"
|
||||||
|
conf.KubernetesVersion = versions.SupportedK8sVersions()[0]
|
||||||
|
return conf
|
||||||
|
}(),
|
||||||
|
currentImageVersion: "v1.2.2",
|
||||||
|
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||||
|
stable: &stubStableClient{
|
||||||
|
configMap: &corev1.ConfigMap{
|
||||||
|
Data: map[string]string{
|
||||||
|
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantUpdate: true,
|
||||||
|
wantErr: true,
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
assertCorrectError: func(t *testing.T, err error) bool {
|
||||||
target := &compatibility.InvalidUpgradeError{}
|
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||||
return assert.ErrorAs(t, err, &target)
|
return assert.ErrorAs(t, err, &upgradeErr)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"not an upgrade": {
|
||||||
|
conf: func() *config.Config {
|
||||||
|
conf := config.Default()
|
||||||
|
conf.Image = "v1.2.2"
|
||||||
|
conf.KubernetesVersion = versions.SupportedK8sVersions()[0]
|
||||||
|
return conf
|
||||||
|
}(),
|
||||||
|
currentImageVersion: "v1.2.2",
|
||||||
|
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||||
|
stable: &stubStableClient{},
|
||||||
|
wantErr: true,
|
||||||
|
assertCorrectError: func(t *testing.T, err error) bool {
|
||||||
|
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||||
|
return assert.ErrorAs(t, err, &upgradeErr)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"upgrade in progress": {
|
"upgrade in progress": {
|
||||||
currentImageVersion: "v1.2.2",
|
conf: func() *config.Config {
|
||||||
newImageVersion: "v1.2.3",
|
conf := config.Default()
|
||||||
|
conf.Image = "v1.2.3"
|
||||||
|
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||||
|
return conf
|
||||||
|
}(),
|
||||||
conditions: []metav1.Condition{{
|
conditions: []metav1.Condition{{
|
||||||
Type: updatev1alpha1.ConditionOutdated,
|
Type: updatev1alpha1.ConditionOutdated,
|
||||||
Status: metav1.ConditionTrue,
|
Status: metav1.ConditionTrue,
|
||||||
}},
|
}},
|
||||||
wantErr: true,
|
currentImageVersion: "v1.2.2",
|
||||||
|
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||||
|
stable: &stubStableClient{},
|
||||||
|
wantErr: true,
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
assertCorrectError: func(t *testing.T, err error) bool {
|
||||||
return assert.ErrorIs(t, err, ErrInProgress)
|
return assert.ErrorIs(t, err, ErrInProgress)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"get error": {
|
"get error": {
|
||||||
|
conf: func() *config.Config {
|
||||||
|
conf := config.Default()
|
||||||
|
conf.Image = "v1.2.3"
|
||||||
|
return conf
|
||||||
|
}(),
|
||||||
getErr: someErr,
|
getErr: someErr,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
assertCorrectError: func(t *testing.T, err error) bool {
|
assertCorrectError: func(t *testing.T, err error) bool {
|
||||||
@ -198,7 +162,8 @@ func TestUpgradeImage(t *testing.T) {
|
|||||||
|
|
||||||
nodeVersion := updatev1alpha1.NodeVersion{
|
nodeVersion := updatev1alpha1.NodeVersion{
|
||||||
Spec: updatev1alpha1.NodeVersionSpec{
|
Spec: updatev1alpha1.NodeVersionSpec{
|
||||||
ImageVersion: tc.currentImageVersion,
|
ImageVersion: tc.currentImageVersion,
|
||||||
|
KubernetesClusterVersion: tc.currentClusterVersion,
|
||||||
},
|
},
|
||||||
Status: updatev1alpha1.NodeVersionStatus{
|
Status: updatev1alpha1.NodeVersionStatus{
|
||||||
Conditions: tc.conditions,
|
Conditions: tc.conditions,
|
||||||
@ -212,11 +177,12 @@ func TestUpgradeImage(t *testing.T) {
|
|||||||
upgrader := Upgrader{
|
upgrader := Upgrader{
|
||||||
stableInterface: tc.stable,
|
stableInterface: tc.stable,
|
||||||
dynamicInterface: dynamicClient,
|
dynamicInterface: dynamicClient,
|
||||||
|
imageFetcher: &stubImageFetcher{},
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
outWriter: io.Discard,
|
outWriter: io.Discard,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = upgrader.UpgradeImage(context.Background(), "", tc.newImageVersion, nil)
|
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf)
|
||||||
|
|
||||||
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
|
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
|
||||||
if tc.wantUpdate {
|
if tc.wantUpdate {
|
||||||
@ -342,25 +308,26 @@ func TestUpdateMeasurements(t *testing.T) {
|
|||||||
func TestUpdateImage(t *testing.T) {
|
func TestUpdateImage(t *testing.T) {
|
||||||
someErr := errors.New("error")
|
someErr := errors.New("error")
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
nodeVersion updatev1alpha1.NodeVersion
|
|
||||||
newImageReference string
|
newImageReference string
|
||||||
newImageVersion string
|
newImageVersion string
|
||||||
|
oldImageReference string
|
||||||
oldImageVersion string
|
oldImageVersion string
|
||||||
updateErr error
|
updateErr error
|
||||||
wantUpdate bool
|
wantUpdate bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
"success": {
|
"success": {
|
||||||
nodeVersion: updatev1alpha1.NodeVersion{
|
oldImageReference: "old-image-ref",
|
||||||
Spec: updatev1alpha1.NodeVersionSpec{
|
oldImageVersion: "v0.0.0",
|
||||||
ImageReference: "old-image-ref",
|
|
||||||
ImageVersion: "old-image-ver",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
newImageReference: "new-image-ref",
|
newImageReference: "new-image-ref",
|
||||||
newImageVersion: "new-image-ver",
|
newImageVersion: "v0.1.0",
|
||||||
wantUpdate: true,
|
wantUpdate: true,
|
||||||
},
|
},
|
||||||
|
"same version fails": {
|
||||||
|
oldImageVersion: "v0.0.0",
|
||||||
|
newImageVersion: "v0.0.0",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
"update error": {
|
"update error": {
|
||||||
updateErr: someErr,
|
updateErr: someErr,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@ -371,14 +338,18 @@ func TestUpdateImage(t *testing.T) {
|
|||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
upgradeClient := &stubDynamicClient{updateErr: tc.updateErr}
|
|
||||||
upgrader := &Upgrader{
|
upgrader := &Upgrader{
|
||||||
dynamicInterface: upgradeClient,
|
log: logger.NewTest(t),
|
||||||
outWriter: io.Discard,
|
|
||||||
log: logger.NewTest(t),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := upgrader.updateImage(context.Background(), tc.nodeVersion, tc.newImageReference, tc.newImageVersion)
|
nodeVersion := updatev1alpha1.NodeVersion{
|
||||||
|
Spec: updatev1alpha1.NodeVersionSpec{
|
||||||
|
ImageReference: tc.oldImageReference,
|
||||||
|
ImageVersion: tc.oldImageVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := upgrader.updateImage(&nodeVersion, tc.newImageReference, tc.newImageVersion)
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
@ -387,10 +358,67 @@ func TestUpdateImage(t *testing.T) {
|
|||||||
|
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
if tc.wantUpdate {
|
if tc.wantUpdate {
|
||||||
assert.Equal(tc.newImageReference, upgradeClient.updatedObject.Object["spec"].(map[string]any)["image"])
|
assert.Equal(tc.newImageReference, nodeVersion.Spec.ImageReference)
|
||||||
assert.Equal(tc.newImageVersion, upgradeClient.updatedObject.Object["spec"].(map[string]any)["imageVersion"])
|
assert.Equal(tc.newImageVersion, nodeVersion.Spec.ImageVersion)
|
||||||
} else {
|
} else {
|
||||||
assert.Nil(upgradeClient.updatedObject)
|
assert.Equal(tc.oldImageReference, nodeVersion.Spec.ImageReference)
|
||||||
|
assert.Equal(tc.oldImageVersion, nodeVersion.Spec.ImageVersion)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateK8s(t *testing.T) {
|
||||||
|
someErr := errors.New("error")
|
||||||
|
testCases := map[string]struct {
|
||||||
|
newClusterVersion string
|
||||||
|
oldClusterVersion string
|
||||||
|
updateErr error
|
||||||
|
wantUpdate bool
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
"success": {
|
||||||
|
oldClusterVersion: "v0.0.0",
|
||||||
|
newClusterVersion: "v0.1.0",
|
||||||
|
wantUpdate: true,
|
||||||
|
},
|
||||||
|
"same version fails": {
|
||||||
|
oldClusterVersion: "v0.0.0",
|
||||||
|
newClusterVersion: "v0.0.0",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
"update error": {
|
||||||
|
updateErr: someErr,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testCases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
upgrader := &Upgrader{
|
||||||
|
log: logger.NewTest(t),
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeVersion := updatev1alpha1.NodeVersion{
|
||||||
|
Spec: updatev1alpha1.NodeVersionSpec{
|
||||||
|
KubernetesClusterVersion: tc.oldClusterVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := upgrader.updateK8s(&nodeVersion, tc.newClusterVersion, components.Components{})
|
||||||
|
|
||||||
|
if tc.wantErr {
|
||||||
|
assert.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NoError(err)
|
||||||
|
if tc.wantUpdate {
|
||||||
|
assert.Equal(tc.newClusterVersion, nodeVersion.Spec.KubernetesClusterVersion)
|
||||||
|
} else {
|
||||||
|
assert.Equal(tc.oldClusterVersion, nodeVersion.Spec.KubernetesClusterVersion)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -428,7 +456,7 @@ func (s *stubStableClient) getCurrentConfigMap(ctx context.Context, name string)
|
|||||||
|
|
||||||
func (s *stubStableClient) updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
func (s *stubStableClient) updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||||
s.updatedConfigMap = configMap
|
s.updatedConfigMap = configMap
|
||||||
return nil, s.updateErr
|
return s.updatedConfigMap, s.updateErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubStableClient) createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
func (s *stubStableClient) createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||||
|
@ -14,14 +14,9 @@ import (
|
|||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||||
"github.com/edgelesssys/constellation/v2/cli/internal/image"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
|
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@ -54,14 +49,13 @@ func runUpgradeApply(cmd *cobra.Command, args []string) error {
|
|||||||
defer log.Sync()
|
defer log.Sync()
|
||||||
|
|
||||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||||
imageFetcher := image.New()
|
|
||||||
upgrader, err := cloudcmd.NewUpgrader(cmd.OutOrStdout(), log)
|
upgrader, err := cloudcmd.NewUpgrader(cmd.OutOrStdout(), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log}
|
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log}
|
||||||
return applyCmd.upgradeApply(cmd, imageFetcher, fileHandler)
|
return applyCmd.upgradeApply(cmd, fileHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
type upgradeApplyCmd struct {
|
type upgradeApplyCmd struct {
|
||||||
@ -69,7 +63,7 @@ type upgradeApplyCmd struct {
|
|||||||
log debugLog
|
log debugLog
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, imageFetcher imageFetcher, fileHandler file.Handler) error {
|
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Handler) error {
|
||||||
flags, err := parseUpgradeApplyFlags(cmd)
|
flags, err := parseUpgradeApplyFlags(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing flags: %w", err)
|
return fmt.Errorf("parsing flags: %w", err)
|
||||||
@ -83,42 +77,23 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, imageFetcher imageFet
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
invalidUpgradeErr := &compatibility.InvalidUpgradeError{}
|
|
||||||
err = u.handleServiceUpgrade(cmd, conf, flags)
|
err = u.handleServiceUpgrade(cmd, conf, flags)
|
||||||
|
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||||
switch {
|
switch {
|
||||||
case errors.As(err, &invalidUpgradeErr):
|
case errors.As(err, &upgradeErr):
|
||||||
cmd.PrintErrf("Skipping microservice upgrades: %s\n", err)
|
cmd.PrintErrln(err)
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return fmt.Errorf("service upgrade: %w", err)
|
return fmt.Errorf("upgrading services: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = u.handleK8sUpgrade(cmd.Context(), conf)
|
err = u.upgrader.UpgradeNodeVersion(cmd.Context(), conf)
|
||||||
skipCtr := 0
|
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, cloudcmd.ErrInProgress):
|
case errors.Is(err, cloudcmd.ErrInProgress):
|
||||||
skipCtr = skipCtr + 1
|
cmd.PrintErrln("Skipping image & Kubernetes upgrades. Another upgrade is in progress")
|
||||||
cmd.PrintErrln("Skipping Kubernetes components upgrades. Another Kubernetes components upgrade is in progress")
|
case errors.As(err, &upgradeErr):
|
||||||
case errors.As(err, &invalidUpgradeErr):
|
cmd.PrintErrln(err)
|
||||||
skipCtr = skipCtr + 1
|
|
||||||
cmd.PrintErrf("Skipping Kubernetes components upgrades: %s\n", err)
|
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return fmt.Errorf("upgrading Kubernetes components: %w", err)
|
return fmt.Errorf("upgrading NodeVersion: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
err = u.handleImageUpgrade(cmd.Context(), conf, imageFetcher)
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, cloudcmd.ErrInProgress):
|
|
||||||
skipCtr = skipCtr + 1
|
|
||||||
cmd.PrintErrln("Skipping image upgrades. Another image upgrade is in progress")
|
|
||||||
case errors.As(err, &invalidUpgradeErr):
|
|
||||||
skipCtr = skipCtr + 1
|
|
||||||
cmd.PrintErrf("Skipping image upgrades: %s\n", err)
|
|
||||||
case err != nil:
|
|
||||||
return fmt.Errorf("upgrading image: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if skipCtr < 2 {
|
|
||||||
fmt.Printf("Nodes will restart automatically\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -140,45 +115,8 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.
|
|||||||
}
|
}
|
||||||
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive)
|
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("upgrading helm: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
func (u *upgradeApplyCmd) handleImageUpgrade(ctx context.Context, conf *config.Config, imageFetcher imageFetcher) error {
|
|
||||||
imageReference, err := imageFetcher.FetchReference(ctx, conf)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("fetching image reference: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("parsing version from image short path: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = u.upgrader.UpgradeImage(ctx, imageReference, imageVersion.Version, conf.GetMeasurements())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("upgrading image: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *upgradeApplyCmd) handleK8sUpgrade(ctx context.Context, conf *config.Config) error {
|
|
||||||
currentVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting Kubernetes version: %w", err)
|
|
||||||
}
|
|
||||||
versionConfig := versions.VersionConfigs[currentVersion]
|
|
||||||
|
|
||||||
err = u.upgrader.UpgradeK8s(ctx, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("upgrading Kubernetes: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
|
||||||
@ -213,11 +151,6 @@ type upgradeApplyFlags struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type cloudUpgrader interface {
|
type cloudUpgrader interface {
|
||||||
UpgradeImage(ctx context.Context, imageReference, imageVersion string, measurements measurements.M) error
|
UpgradeNodeVersion(ctx context.Context, conf *config.Config) error
|
||||||
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
|
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
|
||||||
UpgradeK8s(ctx context.Context, clusterVersion string, components components.Components) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type imageFetcher interface {
|
|
||||||
FetchReference(ctx context.Context, config *config.Config) (string, error)
|
|
||||||
}
|
}
|
||||||
|
@ -12,37 +12,35 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUpgradeApply(t *testing.T) {
|
func TestUpgradeApply(t *testing.T) {
|
||||||
|
someErr := errors.New("some error")
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
upgrader stubUpgrader
|
upgrader stubUpgrader
|
||||||
imageFetcher stubImageFetcher
|
wantErr bool
|
||||||
wantErr bool
|
|
||||||
}{
|
}{
|
||||||
"success": {
|
"success": {
|
||||||
imageFetcher: stubImageFetcher{
|
upgrader: stubUpgrader{},
|
||||||
reference: "someReference",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
"fetch error": {
|
"nodeVersion some error": {
|
||||||
imageFetcher: stubImageFetcher{
|
upgrader: stubUpgrader{nodeVersionErr: someErr},
|
||||||
fetchReferenceErr: errors.New("error"),
|
wantErr: true,
|
||||||
},
|
|
||||||
wantErr: true,
|
|
||||||
},
|
},
|
||||||
"upgrade error": {
|
"nodeVersion in progress error": {
|
||||||
upgrader: stubUpgrader{imageErr: errors.New("error")},
|
upgrader: stubUpgrader{nodeVersionErr: cloudcmd.ErrInProgress},
|
||||||
|
},
|
||||||
|
"helm other error": {
|
||||||
|
upgrader: stubUpgrader{helmErr: someErr},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -60,7 +58,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
|
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
|
||||||
|
|
||||||
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t)}
|
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t)}
|
||||||
err := upgrader.upgradeApply(cmd, &tc.imageFetcher, handler)
|
err := upgrader.upgradeApply(cmd, handler)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
} else {
|
} else {
|
||||||
@ -71,28 +69,14 @@ func TestUpgradeApply(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type stubUpgrader struct {
|
type stubUpgrader struct {
|
||||||
imageErr error
|
nodeVersionErr error
|
||||||
helmErr error
|
helmErr error
|
||||||
k8sErr error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) UpgradeImage(context.Context, string, string, measurements.M) error {
|
func (u stubUpgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config) error {
|
||||||
return u.imageErr
|
return u.nodeVersionErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
func (u stubUpgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
||||||
return u.helmErr
|
return u.helmErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u stubUpgrader) UpgradeK8s(ctx context.Context, clusterVersion string, components components.Components) error {
|
|
||||||
return u.k8sErr
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubImageFetcher struct {
|
|
||||||
reference string
|
|
||||||
fetchReferenceErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *stubImageFetcher) FetchReference(_ context.Context, _ *config.Config) (string, error) {
|
|
||||||
return f.reference, f.fetchReferenceErr
|
|
||||||
}
|
|
||||||
|
@ -8,6 +8,7 @@ package helm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -18,7 +19,6 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/afero"
|
"github.com/spf13/afero"
|
||||||
"helm.sh/helm/v3/pkg/action"
|
"helm.sh/helm/v3/pkg/action"
|
||||||
"helm.sh/helm/v3/pkg/chart"
|
"helm.sh/helm/v3/pkg/chart"
|
||||||
@ -73,23 +73,41 @@ func NewClient(client crdClient, kubeConfigPath, helmNamespace string, log debug
|
|||||||
// If the CLI receives an interrupt signal it will cancel the context.
|
// If the CLI receives an interrupt signal it will cancel the context.
|
||||||
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
|
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
|
||||||
func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
||||||
if err := c.upgradeRelease(ctx, timeout, config, ciliumPath, ciliumReleaseName, false, allowDestructive); err != nil {
|
upgradeErrs := []error{}
|
||||||
return fmt.Errorf("upgrading cilium: %w", err)
|
invalidUpgrade := &compatibility.InvalidUpgradeError{}
|
||||||
|
err := c.upgradeRelease(ctx, timeout, config, ciliumPath, ciliumReleaseName, false, allowDestructive)
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &invalidUpgrade):
|
||||||
|
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Cilium upgrade: %w", err))
|
||||||
|
case err != nil:
|
||||||
|
return fmt.Errorf("upgrading cilium: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.upgradeRelease(ctx, timeout, config, certManagerPath, certManagerReleaseName, false, allowDestructive); err != nil {
|
err = c.upgradeRelease(ctx, timeout, config, certManagerPath, certManagerReleaseName, false, allowDestructive)
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &invalidUpgrade):
|
||||||
|
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping cert-manager upgrade: %w", err))
|
||||||
|
case err != nil:
|
||||||
return fmt.Errorf("upgrading cert-manager: %w", err)
|
return fmt.Errorf("upgrading cert-manager: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.upgradeRelease(ctx, timeout, config, conOperatorsPath, conOperatorsReleaseName, true, allowDestructive); err != nil {
|
err = c.upgradeRelease(ctx, timeout, config, conOperatorsPath, conOperatorsReleaseName, true, allowDestructive)
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &invalidUpgrade):
|
||||||
|
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping constellation operators upgrade: %w", err))
|
||||||
|
case err != nil:
|
||||||
return fmt.Errorf("upgrading constellation operators: %w", err)
|
return fmt.Errorf("upgrading constellation operators: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.upgradeRelease(ctx, timeout, config, conServicesPath, conServicesReleaseName, false, allowDestructive); err != nil {
|
err = c.upgradeRelease(ctx, timeout, config, conServicesPath, conServicesReleaseName, false, allowDestructive)
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &invalidUpgrade):
|
||||||
|
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping constellation-services upgrade: %w", err))
|
||||||
|
case err != nil:
|
||||||
return fmt.Errorf("upgrading constellation-services: %w", err)
|
return fmt.Errorf("upgrading constellation-services: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return errors.Join(upgradeErrs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Versions queries the cluster for running versions and returns a map of releaseName -> version.
|
// Versions queries the cluster for running versions and returns a map of releaseName -> version.
|
||||||
|
@ -41,12 +41,12 @@ func NewInvalidUpgradeError(from string, to string, innerErr error) *InvalidUpgr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unwrap returns the inner error, which is nil in this case.
|
// Unwrap returns the inner error, which is nil in this case.
|
||||||
func (e *InvalidUpgradeError) Unwrap() error {
|
func (e InvalidUpgradeError) Unwrap() error {
|
||||||
return e.innerErr
|
return e.innerErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns the String representation of this error.
|
// Error returns the String representation of this error.
|
||||||
func (e *InvalidUpgradeError) Error() string {
|
func (e InvalidUpgradeError) Error() string {
|
||||||
return fmt.Sprintf("upgrading from %s to %s is not a valid upgrade: %s", e.from, e.to, e.innerErr)
|
return fmt.Sprintf("upgrading from %s to %s is not a valid upgrade: %s", e.from, e.to, e.innerErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user