cli: restructure upgrade apply (#1319)

Applies the updated NodeVersion object with one request
instead of two. This makes sure that the first request does
not accidentially put the cluster into a "updgrade in progress"
status. Which would lead users to having to run apply twice.
This commit is contained in:
Otto Bittner 2023-03-03 09:38:23 +01:00 committed by GitHub
parent 57d675c819
commit f0db5d0395
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 345 additions and 360 deletions

View File

@ -16,6 +16,11 @@ import (
tfjson "github.com/hashicorp/terraform-json"
)
// imageFetcher gets an image reference from the versionsapi.
type imageFetcher interface {
FetchReference(ctx context.Context, config *config.Config) (string, error)
}
type terraformClient interface {
PrepareWorkspace(path string, input terraform.Variables) error
CreateCluster(ctx context.Context) (terraform.CreateOutput, error)
@ -31,10 +36,6 @@ type libvirtRunner interface {
Stop(ctx context.Context) error
}
type imageFetcher interface {
FetchReference(ctx context.Context, config *config.Config) (string, error)
}
type rawDownloader interface {
Download(ctx context.Context, errWriter io.Writer, isTTY bool, source, version string) (string, error)
}

View File

@ -15,13 +15,16 @@ import (
"time"
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/image"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
@ -42,7 +45,7 @@ type Upgrader struct {
stableInterface stableInterface
dynamicInterface dynamicInterface
helmClient helmInterface
imageFetcher imageFetcher
outWriter io.Writer
log debugLog
}
@ -74,100 +77,145 @@ func NewUpgrader(outWriter io.Writer, log debugLog) (*Upgrader, error) {
stableInterface: &stableClient{client: kubeClient},
dynamicInterface: &dynamicClient{client: unstructuredClient},
helmClient: helmClient,
imageFetcher: image.New(),
outWriter: outWriter,
log: log,
}, nil
}
// UpgradeImage upgrades the cluster to the given measurements and image.
func (u *Upgrader) UpgradeImage(ctx context.Context, newImageReference, newImageVersion string, newMeasurements measurements.M) error {
nodeVersion, err := u.getConstellationVersion(ctx)
if err != nil {
return fmt.Errorf("retrieving current image: %w", err)
}
currentImageVersion := nodeVersion.Spec.ImageVersion
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
return err
}
if imageUpgradeInProgress(nodeVersion) {
return ErrInProgress
}
if err := u.updateMeasurements(ctx, newMeasurements); err != nil {
return fmt.Errorf("updating measurements: %w", err)
}
if err := u.updateImage(ctx, nodeVersion, newImageReference, newImageVersion); err != nil {
return fmt.Errorf("updating image: %w", err)
}
return nil
}
// UpgradeHelmServices upgrade helm services.
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive)
}
// UpgradeK8s upgrade the Kubernetes cluster version and the installed components to matching versions.
func (u *Upgrader) UpgradeK8s(ctx context.Context, newClusterVersion string, components components.Components) error {
nodeVersion, err := u.getConstellationVersion(ctx)
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
// The versions set in the config are validated against the versions running in the cluster.
func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config) error {
imageReference, err := u.imageFetcher.FetchReference(ctx, conf)
if err != nil {
return fmt.Errorf("getting kubernetesClusterVersion: %w", err)
return fmt.Errorf("fetching image reference: %w", err)
}
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
if err != nil {
return fmt.Errorf("parsing version from image short path: %w", err)
}
currentK8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion)
if err != nil {
return fmt.Errorf("getting Kubernetes version: %w", err)
}
versionConfig := versions.VersionConfigs[currentK8sVersion]
nodeVersion, err := u.checkClusterStatus(ctx)
if err != nil {
return err
}
if k8sUpgradeInProgress(nodeVersion) {
return ErrInProgress
upgradeErrs := []error{}
upgradeErr := &compatibility.InvalidUpgradeError{}
err = u.updateImage(&nodeVersion, imageReference, imageVersion.Version)
if errors.As(err, &upgradeErr) {
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
}
u.log.Debugf("Upgrading cluster's Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
components, err := u.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
if errors.As(err, &upgradeErr) {
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
}
if len(upgradeErrs) == 2 {
return errors.Join(upgradeErrs...)
}
if err := u.updateMeasurements(ctx, conf.GetMeasurements()); err != nil {
return fmt.Errorf("updating measurements: %w", err)
}
updatedNodeVersion, err := u.applyUpgrade(ctx, &components, nodeVersion)
if err != nil {
return fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
return fmt.Errorf("applying upgrade: %w", err)
}
if updatedNodeVersion.Spec.ImageReference != imageReference ||
updatedNodeVersion.Spec.ImageVersion != imageVersion.Version ||
updatedNodeVersion.Spec.KubernetesComponentsReference != components.ObjectMeta.Name ||
updatedNodeVersion.Spec.KubernetesClusterVersion != versionConfig.ClusterVersion {
return errors.New("unexpected value in updated nodeVersion object")
}
_, err = u.stableInterface.createConfigMap(ctx, &configMap)
return errors.Join(upgradeErrs...)
}
func (u *Upgrader) applyUpgrade(ctx context.Context, components *corev1.ConfigMap, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) {
_, err := u.stableInterface.createConfigMap(ctx, components)
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
if err != nil && !k8serrors.IsAlreadyExists(err) {
return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
return updatev1alpha1.NodeVersion{}, fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
}
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
if err != nil {
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting nodeVersion to unstructured: %w", err)
}
u.log.Debugf("Triggering Kubernetes version upgrade now")
// Send the updated NodeVersion resource
updated, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw})
if err != nil {
return fmt.Errorf("updating NodeVersion: %w", err)
return updatev1alpha1.NodeVersion{}, fmt.Errorf("updating NodeVersion: %w", err)
}
// Verify the update worked as expected
updatedSpec, ok := updated.Object["spec"]
if !ok {
return errors.New("invalid updated NodeVersion spec")
}
updatedMap, ok := updatedSpec.(map[string]any)
if !ok {
return errors.New("invalid updated NodeVersion spec")
}
if updatedMap["kubernetesComponentsReference"] != configMap.ObjectMeta.Name || updatedMap["kubernetesClusterVersion"] != newClusterVersion {
return errors.New("failed to update NodeVersion resource")
var updatedNodeVersion updatev1alpha1.NodeVersion
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
}
fmt.Fprintf(u.outWriter, "Successfully updated the cluster's Kubernetes version to %s\n", newClusterVersion)
return updatedNodeVersion, nil
}
func (u *Upgrader) checkClusterStatus(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
nodeVersion, err := u.getConstellationVersion(ctx)
if err != nil {
return updatev1alpha1.NodeVersion{}, fmt.Errorf("retrieving current image: %w", err)
}
if upgradeInProgress(nodeVersion) {
return updatev1alpha1.NodeVersion{}, ErrInProgress
}
return nodeVersion, nil
}
// updateImage upgrades the cluster to the given measurements and image.
func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImageReference, newImageVersion string) error {
currentImageVersion := nodeVersion.Spec.ImageVersion
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
return err
}
u.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
nodeVersion.Spec.ImageReference = newImageReference
nodeVersion.Spec.ImageVersion = newImageVersion
return nil
}
func (u *Upgrader) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components) (corev1.ConfigMap, error) {
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
if err != nil {
return corev1.ConfigMap{}, fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
}
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
return corev1.ConfigMap{}, err
}
u.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
return configMap, nil
}
// KubernetesVersion returns the version of Kubernetes the Constellation is currently running on.
func (u *Upgrader) KubernetesVersion() (string, error) {
return u.stableInterface.kubernetesVersion()
@ -247,27 +295,9 @@ func (u *Upgrader) updateMeasurements(ctx context.Context, newMeasurements measu
return nil
}
func (u *Upgrader) updateImage(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion, newImageRef, newImageVersion string) error {
u.log.Debugf("Upgrading cluster's image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
nodeVersion.Spec.ImageReference = newImageRef
nodeVersion.Spec.ImageVersion = newImageVersion
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
if err != nil {
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
}
u.log.Debugf("Triggering image version upgrade now")
if _, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw}); err != nil {
return fmt.Errorf("setting new image: %w", err)
}
fmt.Fprintf(u.outWriter, "Successfully updated the cluster's image version to %s\n", newImageVersion)
return nil
}
// k8sUpgradeInProgress checks if a k8s upgrade is in progress.
// upgradeInProgress checks if an upgrade is in progress.
// Returns true with errors as it's the "safer" response. If caller does not check err they at least won't update the cluster.
func k8sUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
func upgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
conditions := nodeVersion.Status.Conditions
activeUpgrade := nodeVersion.Status.ActiveClusterVersionUpgrade
@ -283,15 +313,6 @@ func k8sUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
return false
}
func imageUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
for _, condition := range nodeVersion.Status.Conditions {
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
return true
}
}
return false
}
type dynamicInterface interface {
getCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error)
update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)

View File

@ -15,8 +15,10 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
"github.com/stretchr/testify/assert"
@ -27,122 +29,28 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)
func TestUpgradeK8s(t *testing.T) {
someErr := errors.New("some error")
testCases := map[string]struct {
stable stubStableClient
conditions []metav1.Condition
activeClusterVersionUpgrade bool
newClusterVersion string
currentClusterVersion string
components components.Components
getErr error
assertCorrectError func(t *testing.T, err error) bool
wantErr bool
}{
"success": {
currentClusterVersion: "v1.2.2",
newClusterVersion: "v1.2.3",
},
"not an upgrade": {
currentClusterVersion: "v1.2.3",
newClusterVersion: "v1.2.3",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"downgrade": {
currentClusterVersion: "v1.2.3",
newClusterVersion: "v1.2.2",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"no constellation-version object": {
getErr: someErr,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, someErr)
},
},
"upgrade in progress": {
currentClusterVersion: "v1.2.2",
newClusterVersion: "v1.2.3",
conditions: []metav1.Condition{{
Type: updatev1alpha1.ConditionOutdated,
Status: metav1.ConditionTrue,
}},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, ErrInProgress)
},
},
"configmap create fails": {
currentClusterVersion: "v1.2.2",
newClusterVersion: "v1.2.3",
stable: stubStableClient{
createErr: someErr,
},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, someErr)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
nodeVersion := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
KubernetesClusterVersion: tc.currentClusterVersion,
},
Status: updatev1alpha1.NodeVersionStatus{
Conditions: tc.conditions,
ActiveClusterVersionUpgrade: tc.activeClusterVersionUpgrade,
},
}
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
require.NoError(err)
upgrader := Upgrader{
stableInterface: &tc.stable,
dynamicInterface: &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr},
log: logger.NewTest(t),
outWriter: io.Discard,
}
err = upgrader.UpgradeK8s(context.Background(), tc.newClusterVersion, tc.components)
if tc.wantErr {
tc.assertCorrectError(t, err)
return
}
assert.NoError(err)
})
}
}
func TestUpgradeImage(t *testing.T) {
func TestUpgradeNodeVersion(t *testing.T) {
someErr := errors.New("some error")
testCases := map[string]struct {
stable *stubStableClient
conditions []metav1.Condition
currentImageVersion string
newImageVersion string
currentClusterVersion string
conf *config.Config
getErr error
wantErr bool
wantUpdate bool
assertCorrectError func(t *testing.T, err error) bool
}{
"success": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.3"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.3",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
@ -152,37 +60,93 @@ func TestUpgradeImage(t *testing.T) {
},
wantUpdate: true,
},
"only k8s upgrade": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.2"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
},
},
wantUpdate: true,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
upgradeErr := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &upgradeErr)
},
},
"only image upgrade": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.3"
conf.KubernetesVersion = versions.SupportedK8sVersions()[0]
return conf
}(),
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
},
},
wantUpdate: true,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
upgradeErr := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &upgradeErr)
},
},
"not an upgrade": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.2"
conf.KubernetesVersion = versions.SupportedK8sVersions()[0]
return conf
}(),
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"downgrade": {
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.1",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
upgradeErr := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &upgradeErr)
},
},
"upgrade in progress": {
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.3",
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.3"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
conditions: []metav1.Condition{{
Type: updatev1alpha1.ConditionOutdated,
Status: metav1.ConditionTrue,
}},
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, ErrInProgress)
},
},
"get error": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.3"
return conf
}(),
getErr: someErr,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
@ -199,6 +163,7 @@ func TestUpgradeImage(t *testing.T) {
nodeVersion := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
ImageVersion: tc.currentImageVersion,
KubernetesClusterVersion: tc.currentClusterVersion,
},
Status: updatev1alpha1.NodeVersionStatus{
Conditions: tc.conditions,
@ -212,11 +177,12 @@ func TestUpgradeImage(t *testing.T) {
upgrader := Upgrader{
stableInterface: tc.stable,
dynamicInterface: dynamicClient,
imageFetcher: &stubImageFetcher{},
log: logger.NewTest(t),
outWriter: io.Discard,
}
err = upgrader.UpgradeImage(context.Background(), "", tc.newImageVersion, nil)
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf)
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
if tc.wantUpdate {
@ -342,25 +308,26 @@ func TestUpdateMeasurements(t *testing.T) {
func TestUpdateImage(t *testing.T) {
someErr := errors.New("error")
testCases := map[string]struct {
nodeVersion updatev1alpha1.NodeVersion
newImageReference string
newImageVersion string
oldImageReference string
oldImageVersion string
updateErr error
wantUpdate bool
wantErr bool
}{
"success": {
nodeVersion: updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "old-image-ref",
ImageVersion: "old-image-ver",
},
},
oldImageReference: "old-image-ref",
oldImageVersion: "v0.0.0",
newImageReference: "new-image-ref",
newImageVersion: "new-image-ver",
newImageVersion: "v0.1.0",
wantUpdate: true,
},
"same version fails": {
oldImageVersion: "v0.0.0",
newImageVersion: "v0.0.0",
wantErr: true,
},
"update error": {
updateErr: someErr,
wantErr: true,
@ -371,14 +338,18 @@ func TestUpdateImage(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
upgradeClient := &stubDynamicClient{updateErr: tc.updateErr}
upgrader := &Upgrader{
dynamicInterface: upgradeClient,
outWriter: io.Discard,
log: logger.NewTest(t),
}
err := upgrader.updateImage(context.Background(), tc.nodeVersion, tc.newImageReference, tc.newImageVersion)
nodeVersion := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: tc.oldImageReference,
ImageVersion: tc.oldImageVersion,
},
}
err := upgrader.updateImage(&nodeVersion, tc.newImageReference, tc.newImageVersion)
if tc.wantErr {
assert.Error(err)
@ -387,10 +358,67 @@ func TestUpdateImage(t *testing.T) {
assert.NoError(err)
if tc.wantUpdate {
assert.Equal(tc.newImageReference, upgradeClient.updatedObject.Object["spec"].(map[string]any)["image"])
assert.Equal(tc.newImageVersion, upgradeClient.updatedObject.Object["spec"].(map[string]any)["imageVersion"])
assert.Equal(tc.newImageReference, nodeVersion.Spec.ImageReference)
assert.Equal(tc.newImageVersion, nodeVersion.Spec.ImageVersion)
} else {
assert.Nil(upgradeClient.updatedObject)
assert.Equal(tc.oldImageReference, nodeVersion.Spec.ImageReference)
assert.Equal(tc.oldImageVersion, nodeVersion.Spec.ImageVersion)
}
})
}
}
func TestUpdateK8s(t *testing.T) {
someErr := errors.New("error")
testCases := map[string]struct {
newClusterVersion string
oldClusterVersion string
updateErr error
wantUpdate bool
wantErr bool
}{
"success": {
oldClusterVersion: "v0.0.0",
newClusterVersion: "v0.1.0",
wantUpdate: true,
},
"same version fails": {
oldClusterVersion: "v0.0.0",
newClusterVersion: "v0.0.0",
wantErr: true,
},
"update error": {
updateErr: someErr,
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
upgrader := &Upgrader{
log: logger.NewTest(t),
}
nodeVersion := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
KubernetesClusterVersion: tc.oldClusterVersion,
},
}
_, err := upgrader.updateK8s(&nodeVersion, tc.newClusterVersion, components.Components{})
if tc.wantErr {
assert.Error(err)
return
}
assert.NoError(err)
if tc.wantUpdate {
assert.Equal(tc.newClusterVersion, nodeVersion.Spec.KubernetesClusterVersion)
} else {
assert.Equal(tc.oldClusterVersion, nodeVersion.Spec.KubernetesClusterVersion)
}
})
}
@ -428,7 +456,7 @@ func (s *stubStableClient) getCurrentConfigMap(ctx context.Context, name string)
func (s *stubStableClient) updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
s.updatedConfigMap = configMap
return nil, s.updateErr
return s.updatedConfigMap, s.updateErr
}
func (s *stubStableClient) createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {

View File

@ -14,14 +14,9 @@ import (
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/cli/internal/image"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
@ -54,14 +49,13 @@ func runUpgradeApply(cmd *cobra.Command, args []string) error {
defer log.Sync()
fileHandler := file.NewHandler(afero.NewOsFs())
imageFetcher := image.New()
upgrader, err := cloudcmd.NewUpgrader(cmd.OutOrStdout(), log)
if err != nil {
return err
}
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log}
return applyCmd.upgradeApply(cmd, imageFetcher, fileHandler)
return applyCmd.upgradeApply(cmd, fileHandler)
}
type upgradeApplyCmd struct {
@ -69,7 +63,7 @@ type upgradeApplyCmd struct {
log debugLog
}
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, imageFetcher imageFetcher, fileHandler file.Handler) error {
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Handler) error {
flags, err := parseUpgradeApplyFlags(cmd)
if err != nil {
return fmt.Errorf("parsing flags: %w", err)
@ -83,42 +77,23 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, imageFetcher imageFet
return err
}
invalidUpgradeErr := &compatibility.InvalidUpgradeError{}
err = u.handleServiceUpgrade(cmd, conf, flags)
upgradeErr := &compatibility.InvalidUpgradeError{}
switch {
case errors.As(err, &invalidUpgradeErr):
cmd.PrintErrf("Skipping microservice upgrades: %s\n", err)
case errors.As(err, &upgradeErr):
cmd.PrintErrln(err)
case err != nil:
return fmt.Errorf("service upgrade: %w", err)
return fmt.Errorf("upgrading services: %w", err)
}
err = u.handleK8sUpgrade(cmd.Context(), conf)
skipCtr := 0
err = u.upgrader.UpgradeNodeVersion(cmd.Context(), conf)
switch {
case errors.Is(err, cloudcmd.ErrInProgress):
skipCtr = skipCtr + 1
cmd.PrintErrln("Skipping Kubernetes components upgrades. Another Kubernetes components upgrade is in progress")
case errors.As(err, &invalidUpgradeErr):
skipCtr = skipCtr + 1
cmd.PrintErrf("Skipping Kubernetes components upgrades: %s\n", err)
cmd.PrintErrln("Skipping image & Kubernetes upgrades. Another upgrade is in progress")
case errors.As(err, &upgradeErr):
cmd.PrintErrln(err)
case err != nil:
return fmt.Errorf("upgrading Kubernetes components: %w", err)
}
err = u.handleImageUpgrade(cmd.Context(), conf, imageFetcher)
switch {
case errors.Is(err, cloudcmd.ErrInProgress):
skipCtr = skipCtr + 1
cmd.PrintErrln("Skipping image upgrades. Another image upgrade is in progress")
case errors.As(err, &invalidUpgradeErr):
skipCtr = skipCtr + 1
cmd.PrintErrf("Skipping image upgrades: %s\n", err)
case err != nil:
return fmt.Errorf("upgrading image: %w", err)
}
if skipCtr < 2 {
fmt.Printf("Nodes will restart automatically\n")
return fmt.Errorf("upgrading NodeVersion: %w", err)
}
return nil
@ -140,45 +115,8 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.
}
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive)
}
if err != nil {
return fmt.Errorf("upgrading helm: %w", err)
}
return nil
}
func (u *upgradeApplyCmd) handleImageUpgrade(ctx context.Context, conf *config.Config, imageFetcher imageFetcher) error {
imageReference, err := imageFetcher.FetchReference(ctx, conf)
if err != nil {
return fmt.Errorf("fetching image reference: %w", err)
}
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
if err != nil {
return fmt.Errorf("parsing version from image short path: %w", err)
}
err = u.upgrader.UpgradeImage(ctx, imageReference, imageVersion.Version, conf.GetMeasurements())
if err != nil {
return fmt.Errorf("upgrading image: %w", err)
}
return nil
}
func (u *upgradeApplyCmd) handleK8sUpgrade(ctx context.Context, conf *config.Config) error {
currentVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion)
if err != nil {
return fmt.Errorf("getting Kubernetes version: %w", err)
}
versionConfig := versions.VersionConfigs[currentVersion]
err = u.upgrader.UpgradeK8s(ctx, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
if err != nil {
return fmt.Errorf("upgrading Kubernetes: %w", err)
}
return nil
return err
}
func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
@ -213,11 +151,6 @@ type upgradeApplyFlags struct {
}
type cloudUpgrader interface {
UpgradeImage(ctx context.Context, imageReference, imageVersion string, measurements measurements.M) error
UpgradeNodeVersion(ctx context.Context, conf *config.Config) error
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
UpgradeK8s(ctx context.Context, clusterVersion string, components components.Components) error
}
type imageFetcher interface {
FetchReference(ctx context.Context, config *config.Config) (string, error)
}

View File

@ -12,37 +12,35 @@ import (
"testing"
"time"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestUpgradeApply(t *testing.T) {
someErr := errors.New("some error")
testCases := map[string]struct {
upgrader stubUpgrader
imageFetcher stubImageFetcher
wantErr bool
}{
"success": {
imageFetcher: stubImageFetcher{
reference: "someReference",
},
},
"fetch error": {
imageFetcher: stubImageFetcher{
fetchReferenceErr: errors.New("error"),
upgrader: stubUpgrader{},
},
"nodeVersion some error": {
upgrader: stubUpgrader{nodeVersionErr: someErr},
wantErr: true,
},
"upgrade error": {
upgrader: stubUpgrader{imageErr: errors.New("error")},
"nodeVersion in progress error": {
upgrader: stubUpgrader{nodeVersionErr: cloudcmd.ErrInProgress},
},
"helm other error": {
upgrader: stubUpgrader{helmErr: someErr},
wantErr: true,
},
}
@ -60,7 +58,7 @@ func TestUpgradeApply(t *testing.T) {
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t)}
err := upgrader.upgradeApply(cmd, &tc.imageFetcher, handler)
err := upgrader.upgradeApply(cmd, handler)
if tc.wantErr {
assert.Error(err)
} else {
@ -71,28 +69,14 @@ func TestUpgradeApply(t *testing.T) {
}
type stubUpgrader struct {
imageErr error
nodeVersionErr error
helmErr error
k8sErr error
}
func (u stubUpgrader) UpgradeImage(context.Context, string, string, measurements.M) error {
return u.imageErr
func (u stubUpgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config) error {
return u.nodeVersionErr
}
func (u stubUpgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
return u.helmErr
}
func (u stubUpgrader) UpgradeK8s(ctx context.Context, clusterVersion string, components components.Components) error {
return u.k8sErr
}
type stubImageFetcher struct {
reference string
fetchReferenceErr error
}
func (f *stubImageFetcher) FetchReference(_ context.Context, _ *config.Config) (string, error) {
return f.reference, f.fetchReferenceErr
}

View File

@ -8,6 +8,7 @@ package helm
import (
"context"
"errors"
"fmt"
"strings"
"time"
@ -18,7 +19,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/pkg/errors"
"github.com/spf13/afero"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
@ -73,23 +73,41 @@ func NewClient(client crdClient, kubeConfigPath, helmNamespace string, log debug
// If the CLI receives an interrupt signal it will cancel the context.
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
if err := c.upgradeRelease(ctx, timeout, config, ciliumPath, ciliumReleaseName, false, allowDestructive); err != nil {
return fmt.Errorf("upgrading cilium: %w", err)
upgradeErrs := []error{}
invalidUpgrade := &compatibility.InvalidUpgradeError{}
err := c.upgradeRelease(ctx, timeout, config, ciliumPath, ciliumReleaseName, false, allowDestructive)
switch {
case errors.As(err, &invalidUpgrade):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Cilium upgrade: %w", err))
case err != nil:
return fmt.Errorf("upgrading cilium: %s", err)
}
if err := c.upgradeRelease(ctx, timeout, config, certManagerPath, certManagerReleaseName, false, allowDestructive); err != nil {
err = c.upgradeRelease(ctx, timeout, config, certManagerPath, certManagerReleaseName, false, allowDestructive)
switch {
case errors.As(err, &invalidUpgrade):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping cert-manager upgrade: %w", err))
case err != nil:
return fmt.Errorf("upgrading cert-manager: %w", err)
}
if err := c.upgradeRelease(ctx, timeout, config, conOperatorsPath, conOperatorsReleaseName, true, allowDestructive); err != nil {
err = c.upgradeRelease(ctx, timeout, config, conOperatorsPath, conOperatorsReleaseName, true, allowDestructive)
switch {
case errors.As(err, &invalidUpgrade):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping constellation operators upgrade: %w", err))
case err != nil:
return fmt.Errorf("upgrading constellation operators: %w", err)
}
if err := c.upgradeRelease(ctx, timeout, config, conServicesPath, conServicesReleaseName, false, allowDestructive); err != nil {
err = c.upgradeRelease(ctx, timeout, config, conServicesPath, conServicesReleaseName, false, allowDestructive)
switch {
case errors.As(err, &invalidUpgrade):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping constellation-services upgrade: %w", err))
case err != nil:
return fmt.Errorf("upgrading constellation-services: %w", err)
}
return nil
return errors.Join(upgradeErrs...)
}
// Versions queries the cluster for running versions and returns a map of releaseName -> version.

View File

@ -41,12 +41,12 @@ func NewInvalidUpgradeError(from string, to string, innerErr error) *InvalidUpgr
}
// Unwrap returns the inner error, which is nil in this case.
func (e *InvalidUpgradeError) Unwrap() error {
func (e InvalidUpgradeError) Unwrap() error {
return e.innerErr
}
// Error returns the String representation of this error.
func (e *InvalidUpgradeError) Error() string {
func (e InvalidUpgradeError) Error() string {
return fmt.Sprintf("upgrading from %s to %s is not a valid upgrade: %s", e.from, e.to, e.innerErr)
}