cli: refactor upgrade apply cmd to match name

* `upgrade apply` will try to make the locally configured and
actual version in the cluster match by appling necessary
upgrades.
* Skip image or kubernetes upgrades if one is already
in progress.
* Skip downgrades/equal-as-running versions
* Move NodeVersionResourceName constant from operators
to internal as its needed in the CLI.
This commit is contained in:
Otto Bittner 2023-02-09 15:54:12 +01:00
parent 3cebd68c24
commit 50646b2a10
18 changed files with 611 additions and 241 deletions

View File

@ -13,6 +13,9 @@ ENV PATH ${PATH}:/usr/local/go/bin
# Download go dependencies
WORKDIR /constellation/
# Necessary to make `go mod download all` work while having a local replace rule in the root-go.mod.
COPY operators/constellation-node-operator/api/go.mod ./operators/constellation-node-operator/api/go.mod
COPY operators/constellation-node-operator/api/go.sum ./operators/constellation-node-operator/api/go.sum
COPY go.mod ./
COPY go.sum ./
RUN go mod download all

View File

@ -16,18 +16,44 @@ import (
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
// ErrInProgress signals that an upgrade is in progress inside the cluster.
var ErrInProgress = errors.New("upgrade in progress")
// InvalidUpgradeError present an invalid upgrade. It wraps the source and destination version for improved debuggability.
type InvalidUpgradeError struct {
from string
to string
innerErr error
}
// Unwrap returns the inner error, which is nil in this case.
func (e *InvalidUpgradeError) Unwrap() error {
return e.innerErr
}
// Error returns the String representation of this error.
func (e *InvalidUpgradeError) Error() string {
return fmt.Sprintf("upgrading from %s to %s is not a valid upgrade: %s", e.from, e.to, e.innerErr)
}
// Upgrader handles upgrading the cluster's components using the CLI.
type Upgrader struct {
stableInterface stableInterface
@ -35,6 +61,7 @@ type Upgrader struct {
helmClient helmInterface
outWriter io.Writer
log debugLog
}
// NewUpgrader returns a new Upgrader.
@ -65,75 +92,144 @@ func NewUpgrader(outWriter io.Writer, log debugLog) (*Upgrader, error) {
dynamicInterface: &dynamicClient{client: unstructuredClient},
helmClient: helmClient,
outWriter: outWriter,
log: log,
}, nil
}
// UpgradeImage upgrades the cluster to the given measurements and image.
func (u *Upgrader) UpgradeImage(ctx context.Context, imageReference, imageVersion string, measurements measurements.M) error {
if err := u.updateMeasurements(ctx, measurements); err != nil {
func (u *Upgrader) UpgradeImage(ctx context.Context, newImageReference, newImageVersion string, newMeasurements measurements.M) error {
nodeVersion, err := u.getConstellationVersion(ctx)
if err != nil {
return fmt.Errorf("retrieving current image: %w", err)
}
currentImageVersion := nodeVersion.Spec.ImageVersion
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
return &InvalidUpgradeError{from: currentImageVersion, to: newImageVersion, innerErr: err}
}
if imageUpgradeInProgress(nodeVersion) {
return ErrInProgress
}
if err := u.updateMeasurements(ctx, newMeasurements); err != nil {
return fmt.Errorf("updating measurements: %w", err)
}
if err := u.updateImage(ctx, imageReference, imageVersion); err != nil {
if err := u.updateImage(ctx, nodeVersion, newImageReference, newImageVersion); err != nil {
return fmt.Errorf("updating image: %w", err)
}
return nil
}
// GetCurrentImage returns the currently used image version of the cluster.
func (u *Upgrader) GetCurrentImage(ctx context.Context) (*unstructured.Unstructured, string, error) {
return u.getFromConstellationVersion(ctx, "imageVersion")
}
// GetCurrentKubernetesVersion returns the currently used Kubernetes version.
func (u *Upgrader) GetCurrentKubernetesVersion(ctx context.Context) (*unstructured.Unstructured, string, error) {
return u.getFromConstellationVersion(ctx, "kubernetesClusterVersion")
}
// getFromConstellationVersion queries the constellation-version object for a given field.
func (u *Upgrader) getFromConstellationVersion(ctx context.Context, fieldName string) (*unstructured.Unstructured, string, error) {
versionStruct, err := u.dynamicInterface.getCurrent(ctx, "constellation-version")
if err != nil {
return nil, "", err
}
spec, ok := versionStruct.Object["spec"]
if !ok {
return nil, "", errors.New("spec missing")
}
retErr := errors.New("invalid spec")
specMap, ok := spec.(map[string]any)
if !ok {
return nil, "", retErr
}
fieldValue, ok := specMap[fieldName]
if !ok {
return nil, "", retErr
}
fieldValueString, ok := fieldValue.(string)
if !ok {
return nil, "", retErr
}
return versionStruct, fieldValueString, nil
}
// UpgradeHelmServices upgrade helm services.
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive)
}
// UpgradeK8s upgrade the Kubernetes cluster version and the installed components to matching versions.
func (u *Upgrader) UpgradeK8s(ctx context.Context, newClusterVersion string, components components.Components) error {
nodeVersion, err := u.getConstellationVersion(ctx)
if err != nil {
return fmt.Errorf("getting kubernetesClusterVersion: %w", err)
}
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
return &InvalidUpgradeError{from: nodeVersion.Spec.KubernetesClusterVersion, to: newClusterVersion, innerErr: err}
}
if k8sUpgradeInProgress(nodeVersion) {
return ErrInProgress
}
u.log.Debugf("Upgrading cluster's Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
if err != nil {
return fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
}
_, err = u.stableInterface.createConfigMap(ctx, &configMap)
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
if err != nil && !k8serrors.IsAlreadyExists(err) {
return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
}
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
if err != nil {
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
}
u.log.Debugf("Triggering Kubernetes version upgrade now")
// Send the updated NodeVersion resource
updated, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw})
if err != nil {
return fmt.Errorf("updating NodeVersion: %w", err)
}
// Verify the update worked as expected
updatedSpec, ok := updated.Object["spec"]
if !ok {
return errors.New("invalid updated NodeVersion spec")
}
updatedMap, ok := updatedSpec.(map[string]any)
if !ok {
return errors.New("invalid updated NodeVersion spec")
}
if updatedMap["kubernetesComponentsReference"] != configMap.ObjectMeta.Name || updatedMap["kubernetesClusterVersion"] != newClusterVersion {
return errors.New("failed to update NodeVersion resource")
}
fmt.Fprintf(u.outWriter, "Successfully updated the cluster's Kubernetes version to %s\n", newClusterVersion)
return nil
}
// KubernetesVersion returns the version of Kubernetes the Constellation is currently running on.
func (u *Upgrader) KubernetesVersion() (string, error) {
return u.stableInterface.kubernetesVersion()
}
// CurrentImage returns the currently used image version of the cluster.
func (u *Upgrader) CurrentImage(ctx context.Context) (string, error) {
nodeVersion, err := u.getConstellationVersion(ctx)
if err != nil {
return "", fmt.Errorf("getting constellation-version: %w", err)
}
return nodeVersion.Spec.ImageVersion, nil
}
// CurrentKubernetesVersion returns the currently used Kubernetes version.
func (u *Upgrader) CurrentKubernetesVersion(ctx context.Context) (string, error) {
nodeVersion, err := u.getConstellationVersion(ctx)
if err != nil {
return "", fmt.Errorf("getting constellation-version: %w", err)
}
return nodeVersion.Spec.KubernetesClusterVersion, nil
}
// getFromConstellationVersion queries the constellation-version object for a given field.
func (u *Upgrader) getConstellationVersion(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
raw, err := u.dynamicInterface.getCurrent(ctx, "constellation-version")
if err != nil {
return updatev1alpha1.NodeVersion{}, err
}
var nodeVersion updatev1alpha1.NodeVersion
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.UnstructuredContent(), &nodeVersion); err != nil {
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
}
return nodeVersion, nil
}
func (u *Upgrader) updateMeasurements(ctx context.Context, newMeasurements measurements.M) error {
existingConf, err := u.stableInterface.getCurrent(ctx, constants.JoinConfigMap)
existingConf, err := u.stableInterface.getCurrentConfigMap(ctx, constants.JoinConfigMap)
if err != nil {
return fmt.Errorf("retrieving current measurements: %w", err)
}
if _, ok := existingConf.Data[constants.MeasurementsFilename]; !ok {
return errors.New("measurements missing from join-config")
}
var currentMeasurements measurements.M
if err := json.Unmarshal([]byte(existingConf.Data[constants.MeasurementsFilename]), &currentMeasurements); err != nil {
return fmt.Errorf("retrieving current measurements: %w", err)
@ -158,7 +254,8 @@ func (u *Upgrader) updateMeasurements(ctx context.Context, newMeasurements measu
return fmt.Errorf("marshaling measurements: %w", err)
}
existingConf.Data[constants.MeasurementsFilename] = string(measurementsJSON)
_, err = u.stableInterface.update(ctx, existingConf)
u.log.Debugf("Triggering measurements config map update now")
_, err = u.stableInterface.updateConfigMap(ctx, existingConf)
if err != nil {
return fmt.Errorf("setting new measurements: %w", err)
}
@ -167,35 +264,60 @@ func (u *Upgrader) updateMeasurements(ctx context.Context, newMeasurements measu
return nil
}
func (u *Upgrader) updateImage(ctx context.Context, imageReference, imageVersion string) error {
currentImage, currentImageVersion, err := u.GetCurrentImage(ctx)
func (u *Upgrader) updateImage(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion, newImageRef, newImageVersion string) error {
u.log.Debugf("Upgrading cluster's image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
nodeVersion.Spec.ImageReference = newImageRef
nodeVersion.Spec.ImageVersion = newImageVersion
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
if err != nil {
return fmt.Errorf("retrieving current image: %w", err)
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
}
if currentImageVersion == imageVersion {
fmt.Fprintln(u.outWriter, "Cluster is already using the chosen image, skipping image upgrade")
return nil
}
currentImage.Object["spec"].(map[string]any)["image"] = imageReference
currentImage.Object["spec"].(map[string]any)["imageVersion"] = imageVersion
if _, err := u.dynamicInterface.update(ctx, currentImage); err != nil {
u.log.Debugf("Triggering image version upgrade now")
if _, err := u.dynamicInterface.update(ctx, &unstructured.Unstructured{Object: raw}); err != nil {
return fmt.Errorf("setting new image: %w", err)
}
fmt.Fprintln(u.outWriter, "Successfully updated the cluster's image, upgrades will be applied automatically")
fmt.Fprintf(u.outWriter, "Successfully updated the cluster's image version to %s\n", newImageVersion)
return nil
}
// k8sUpgradeInProgress checks if a k8s upgrade is in progress.
// Returns true with errors as it's the "safer" response. If caller does not check err they at least won't update the cluster.
func k8sUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
conditions := nodeVersion.Status.Conditions
activeUpgrade := nodeVersion.Status.ActiveClusterVersionUpgrade
if activeUpgrade {
return true
}
for _, condition := range conditions {
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func imageUpgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
for _, condition := range nodeVersion.Status.Conditions {
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
return true
}
}
return false
}
type dynamicInterface interface {
getCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error)
update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
}
type stableInterface interface {
getCurrent(ctx context.Context, name string) (*corev1.ConfigMap, error)
update(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
getCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
kubernetesVersion() (string, error)
}
@ -225,16 +347,20 @@ type stableClient struct {
client kubernetes.Interface
}
// getCurrent returns the cluster's expected measurements.
func (u *stableClient) getCurrent(ctx context.Context, name string) (*corev1.ConfigMap, error) {
// getCurrent returns a ConfigMap given it's name.
func (u *stableClient) getCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Get(ctx, name, metav1.GetOptions{})
}
// update updates the cluster's expected measurements in Kubernetes.
func (u *stableClient) update(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
// update updates the given ConfigMap.
func (u *stableClient) updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Update(ctx, configMap, metav1.UpdateOptions{})
}
func (u *stableClient) createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Create(ctx, configMap, metav1.CreateOptions{})
}
func (u *stableClient) kubernetesVersion() (string, error) {
serverVersion, err := u.client.Discovery().ServerVersion()
if err != nil {

View File

@ -7,31 +7,244 @@ SPDX-License-Identifier: AGPL-3.0-only
package cloudcmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"testing"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
func TestUpgradeK8s(t *testing.T) {
someErr := errors.New("some error")
testCases := map[string]struct {
stable stubStableClient
conditions []metav1.Condition
activeClusterVersionUpgrade bool
newClusterVersion string
currentClusterVersion string
components components.Components
getErr error
assertCorrectError func(t *testing.T, err error) bool
wantErr bool
}{
"success": {
currentClusterVersion: "v1.2.2",
newClusterVersion: "v1.2.3",
},
"not an upgrade": {
currentClusterVersion: "v1.2.3",
newClusterVersion: "v1.2.3",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"downgrade": {
currentClusterVersion: "v1.2.3",
newClusterVersion: "v1.2.2",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"no constellation-version object": {
getErr: someErr,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, someErr)
},
},
"upgrade in progress": {
currentClusterVersion: "v1.2.2",
newClusterVersion: "v1.2.3",
conditions: []metav1.Condition{{
Type: updatev1alpha1.ConditionOutdated,
Status: metav1.ConditionTrue,
}},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, ErrInProgress)
},
},
"configmap create fails": {
currentClusterVersion: "v1.2.2",
newClusterVersion: "v1.2.3",
stable: stubStableClient{
createErr: someErr,
},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, someErr)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
nodeVersion := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
KubernetesClusterVersion: tc.currentClusterVersion,
},
Status: updatev1alpha1.NodeVersionStatus{
Conditions: tc.conditions,
ActiveClusterVersionUpgrade: tc.activeClusterVersionUpgrade,
},
}
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
require.NoError(err)
upgrader := Upgrader{
stableInterface: &tc.stable,
dynamicInterface: &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr},
log: logger.NewTest(t),
outWriter: io.Discard,
}
err = upgrader.UpgradeK8s(context.Background(), tc.newClusterVersion, tc.components)
if tc.wantErr {
tc.assertCorrectError(t, err)
return
}
assert.NoError(err)
})
}
}
func TestUpgradeImage(t *testing.T) {
someErr := errors.New("some error")
testCases := map[string]struct {
stable *stubStableClient
conditions []metav1.Condition
currentImageVersion string
newImageVersion string
getErr error
wantErr bool
wantUpdate bool
assertCorrectError func(t *testing.T, err error) bool
}{
"success": {
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.3",
stable: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
},
},
wantUpdate: true,
},
"not an upgrade": {
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.2",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"downgrade": {
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.1",
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
target := &InvalidUpgradeError{}
return assert.ErrorAs(t, err, &target)
},
},
"upgrade in progress": {
currentImageVersion: "v1.2.2",
newImageVersion: "v1.2.3",
conditions: []metav1.Condition{{
Type: updatev1alpha1.ConditionOutdated,
Status: metav1.ConditionTrue,
}},
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, ErrInProgress)
},
},
"get error": {
getErr: someErr,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
return assert.ErrorIs(t, err, someErr)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
nodeVersion := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
ImageVersion: tc.currentImageVersion,
},
Status: updatev1alpha1.NodeVersionStatus{
Conditions: tc.conditions,
},
}
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
require.NoError(err)
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr}
upgrader := Upgrader{
stableInterface: tc.stable,
dynamicInterface: dynamicClient,
log: logger.NewTest(t),
outWriter: io.Discard,
}
err = upgrader.UpgradeImage(context.Background(), "", tc.newImageVersion, nil)
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
if tc.wantUpdate {
assert.NotNil(dynamicClient.updatedObject)
} else {
assert.Nil(dynamicClient.updatedObject)
}
if tc.wantErr {
assert.Error(err)
tc.assertCorrectError(t, err)
return
}
assert.NoError(err)
})
}
}
func TestUpdateMeasurements(t *testing.T) {
someErr := errors.New("error")
testCases := map[string]struct {
updater *stubClientInterface
updater *stubStableClient
newMeasurements measurements.M
wantUpdate bool
wantErr bool
}{
"success": {
updater: &stubClientInterface{
oldMeasurements: &corev1.ConfigMap{
updater: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
@ -43,8 +256,8 @@ func TestUpdateMeasurements(t *testing.T) {
wantUpdate: true,
},
"measurements are the same": {
updater: &stubClientInterface{
oldMeasurements: &corev1.ConfigMap{
updater: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
@ -55,8 +268,8 @@ func TestUpdateMeasurements(t *testing.T) {
},
},
"trying to set warnOnly to true results in error": {
updater: &stubClientInterface{
oldMeasurements: &corev1.ConfigMap{
updater: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
@ -68,8 +281,8 @@ func TestUpdateMeasurements(t *testing.T) {
wantErr: true,
},
"setting warnOnly to false is allowed": {
updater: &stubClientInterface{
oldMeasurements: &corev1.ConfigMap{
updater: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`,
},
@ -81,12 +294,12 @@ func TestUpdateMeasurements(t *testing.T) {
wantUpdate: true,
},
"getCurrent error": {
updater: &stubClientInterface{getErr: someErr},
updater: &stubStableClient{getErr: someErr},
wantErr: true,
},
"update error": {
updater: &stubClientInterface{
oldMeasurements: &corev1.ConfigMap{
updater: &stubStableClient{
configMap: &corev1.ConfigMap{
Data: map[string]string{
constants.MeasurementsFilename: `{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`,
},
@ -103,7 +316,8 @@ func TestUpdateMeasurements(t *testing.T) {
upgrader := &Upgrader{
stableInterface: tc.updater,
outWriter: &bytes.Buffer{},
outWriter: io.Discard,
log: logger.NewTest(t),
}
err := upgrader.updateMeasurements(context.Background(), tc.newMeasurements)
@ -116,127 +330,39 @@ func TestUpdateMeasurements(t *testing.T) {
if tc.wantUpdate {
newMeasurementsJSON, err := json.Marshal(tc.newMeasurements)
require.NoError(t, err)
assert.JSONEq(string(newMeasurementsJSON), tc.updater.updatedMeasurements.Data[constants.MeasurementsFilename])
assert.JSONEq(string(newMeasurementsJSON), tc.updater.updatedConfigMap.Data[constants.MeasurementsFilename])
} else {
assert.Nil(tc.updater.updatedMeasurements)
assert.Nil(tc.updater.updatedConfigMap)
}
})
}
}
type stubClientInterface struct {
oldMeasurements *corev1.ConfigMap
updatedMeasurements *corev1.ConfigMap
k8sVersion string
getErr error
updateErr error
k8sVersionErr error
}
func (u *stubClientInterface) getCurrent(context.Context, string) (*corev1.ConfigMap, error) {
return u.oldMeasurements, u.getErr
}
func (u *stubClientInterface) update(_ context.Context, updatedMeasurements *corev1.ConfigMap) (*corev1.ConfigMap, error) {
u.updatedMeasurements = updatedMeasurements
return nil, u.updateErr
}
func (u *stubClientInterface) kubernetesVersion() (string, error) {
return u.k8sVersion, u.k8sVersionErr
}
func TestUpdateImage(t *testing.T) {
someErr := errors.New("error")
testCases := map[string]struct {
updater *stubImageUpdater
nodeVersion updatev1alpha1.NodeVersion
newImageReference string
newImageVersion string
oldImageVersion string
updateErr error
wantUpdate bool
wantErr bool
}{
"success": {
updater: &stubImageUpdater{
setImage: &unstructured.Unstructured{
Object: map[string]any{
"spec": map[string]any{
"image": "old-image-ref",
"imageVersion": "old-image-ver",
},
},
nodeVersion: updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "old-image-ref",
ImageVersion: "old-image-ver",
},
},
newImageReference: "new-image-ref",
newImageVersion: "new-image-ver",
wantUpdate: true,
},
"image is the same": {
updater: &stubImageUpdater{
setImage: &unstructured.Unstructured{
Object: map[string]any{
"spec": map[string]any{
"image": "old-image-ref",
"imageVersion": "old-image-ver",
},
},
},
},
newImageReference: "old-image-ref",
newImageVersion: "old-image-ver",
},
"getCurrent error": {
updater: &stubImageUpdater{getErr: someErr},
wantErr: true,
},
"update error": {
updater: &stubImageUpdater{
setImage: &unstructured.Unstructured{
Object: map[string]any{
"spec": map[string]any{
"image": "old-image-ref",
"imageVersion": "old-image-ver",
},
},
},
updateErr: someErr,
},
newImageReference: "new-image-ref",
newImageVersion: "new-image-ver",
wantErr: true,
},
"no spec": {
updater: &stubImageUpdater{
setImage: &unstructured.Unstructured{
Object: map[string]any{},
},
},
newImageReference: "new-image-ref",
newImageVersion: "new-image-ver",
wantErr: true,
},
"not a map": {
updater: &stubImageUpdater{
setImage: &unstructured.Unstructured{
Object: map[string]any{
"spec": "not a map",
},
},
},
newImageReference: "new-image-ref",
newImageVersion: "new-image-ver",
wantErr: true,
},
"no spec.image": {
updater: &stubImageUpdater{
setImage: &unstructured.Unstructured{
Object: map[string]any{
"spec": map[string]any{},
},
},
},
newImageReference: "new-image-ref",
newImageVersion: "new-image-ver",
wantErr: true,
updateErr: someErr,
wantErr: true,
},
}
@ -244,12 +370,14 @@ func TestUpdateImage(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
upgradeClient := &stubDynamicClient{updateErr: tc.updateErr}
upgrader := &Upgrader{
dynamicInterface: tc.updater,
outWriter: &bytes.Buffer{},
dynamicInterface: upgradeClient,
outWriter: io.Discard,
log: logger.NewTest(t),
}
err := upgrader.updateImage(context.Background(), tc.newImageReference, tc.newImageVersion)
err := upgrader.updateImage(context.Background(), tc.nodeVersion, tc.newImageReference, tc.newImageVersion)
if tc.wantErr {
assert.Error(err)
@ -258,27 +386,55 @@ func TestUpdateImage(t *testing.T) {
assert.NoError(err)
if tc.wantUpdate {
assert.Equal(tc.newImageReference, tc.updater.updatedImage.Object["spec"].(map[string]any)["image"])
assert.Equal(tc.newImageVersion, tc.updater.updatedImage.Object["spec"].(map[string]any)["imageVersion"])
assert.Equal(tc.newImageReference, upgradeClient.updatedObject.Object["spec"].(map[string]any)["image"])
assert.Equal(tc.newImageVersion, upgradeClient.updatedObject.Object["spec"].(map[string]any)["imageVersion"])
} else {
assert.Nil(tc.updater.updatedImage)
assert.Nil(upgradeClient.updatedObject)
}
})
}
}
type stubImageUpdater struct {
setImage *unstructured.Unstructured
updatedImage *unstructured.Unstructured
getErr error
updateErr error
type stubDynamicClient struct {
object *unstructured.Unstructured
updatedObject *unstructured.Unstructured
getErr error
updateErr error
}
func (u *stubImageUpdater) getCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error) {
return u.setImage, u.getErr
func (u *stubDynamicClient) getCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error) {
return u.object, u.getErr
}
func (u *stubImageUpdater) update(_ context.Context, updatedImage *unstructured.Unstructured) (*unstructured.Unstructured, error) {
u.updatedImage = updatedImage
return nil, u.updateErr
func (u *stubDynamicClient) update(_ context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
u.updatedObject = updatedObject
return u.updatedObject, u.updateErr
}
type stubStableClient struct {
configMap *corev1.ConfigMap
updatedConfigMap *corev1.ConfigMap
k8sVersion string
getErr error
updateErr error
createErr error
k8sErr error
}
func (s *stubStableClient) getCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
return s.configMap, s.getErr
}
func (s *stubStableClient) updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
s.updatedConfigMap = configMap
return nil, s.updateErr
}
func (s *stubStableClient) createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
s.configMap = configMap
return s.configMap, s.createErr
}
func (s *stubStableClient) kubernetesVersion() (string, error) {
return s.k8sVersion, s.k8sErr
}

View File

@ -525,7 +525,7 @@ func (m *stubMerger) kubeconfigEnvVar() string {
func defaultConfigWithExpectedMeasurements(t *testing.T, conf *config.Config, csp cloudprovider.Provider) *config.Config {
t.Helper()
conf.Image = constants.VersionInfo
conf.Image = "v0.0.0"
conf.Name = "kubernetes"
switch csp {

View File

@ -14,8 +14,8 @@ import (
func NewUpgradeCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: "Find and execute upgrades to your Constellation cluster",
Long: "Find and execute upgrades to your Constellation cluster.",
Short: "Find and apply upgrades to your Constellation cluster",
Long: "Find and apply upgrades to your Constellation cluster.",
Args: cobra.ExactArgs(0),
}

View File

@ -18,13 +18,16 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
func newUpgradeApplyCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "execute",
Use: "apply",
Short: "Apply an upgrade to a Constellation cluster",
Long: "Apply an upgrade to a Constellation cluster by applying the chosen configuration.",
Args: cobra.NoArgs,
@ -56,10 +59,16 @@ func runUpgradeApply(cmd *cobra.Command, args []string) error {
return err
}
return upgradeApply(cmd, imageFetcher, upgrader, fileHandler)
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log}
return applyCmd.upgradeApply(cmd, imageFetcher, fileHandler)
}
func upgradeApply(cmd *cobra.Command, imageFetcher imageFetcher, upgrader cloudUpgrader, fileHandler file.Handler) error {
type upgradeApplyCmd struct {
upgrader cloudUpgrader
log debugLog
}
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, imageFetcher imageFetcher, fileHandler file.Handler) error {
flags, err := parseUpgradeApplyFlags(cmd)
if err != nil {
return fmt.Errorf("parsing flags: %w", err)
@ -69,18 +78,45 @@ func upgradeApply(cmd *cobra.Command, imageFetcher imageFetcher, upgrader cloudU
return config.DisplayValidationErrors(cmd.ErrOrStderr(), err)
}
if err := handleServiceUpgrade(cmd, upgrader, conf, flags); err != nil {
return err
if err := u.handleServiceUpgrade(cmd, conf, flags); err != nil {
return fmt.Errorf("service upgrade: %w", err)
}
// TODO: validate upgrade config? Should be basic things like checking image is not an empty string
// More sophisticated validation, like making sure we don't downgrade the cluster, should be done by `constellation upgrade plan`
invalidUpgradeErr := &cloudcmd.InvalidUpgradeError{}
err = u.handleK8sUpgrade(cmd.Context(), conf)
skipCtr := 0
switch {
case errors.Is(err, cloudcmd.ErrInProgress):
skipCtr = skipCtr + 1
cmd.PrintErrln("Skipping Kubernetes components upgrades. Another Kubernetes components upgrade is in progress")
case errors.As(err, &invalidUpgradeErr):
skipCtr = skipCtr + 1
cmd.PrintErrf("Skipping Kubernetes components upgrades: %s\n", err)
case err != nil:
return fmt.Errorf("upgrading Kubernetes components: %w", err)
}
return handleImageUpgrade(cmd.Context(), conf, imageFetcher, upgrader)
err = u.handleImageUpgrade(cmd.Context(), conf, imageFetcher)
switch {
case errors.Is(err, cloudcmd.ErrInProgress):
skipCtr = skipCtr + 1
cmd.PrintErrln("Skipping image upgrades. Another image upgrade is in progress")
case errors.As(err, &invalidUpgradeErr):
skipCtr = skipCtr + 1
cmd.PrintErrf("Skipping image upgrades: %s\n", err)
case err != nil:
return fmt.Errorf("upgrading image: %w", err)
}
if skipCtr < 2 {
fmt.Printf("Nodes will restart automatically\n")
}
return nil
}
func handleServiceUpgrade(cmd *cobra.Command, upgrader cloudUpgrader, conf *config.Config, flags upgradeApplyFlags) error {
err := upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.DenyDestructive)
func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.Config, flags upgradeApplyFlags) error {
err := u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.DenyDestructive)
if errors.Is(err, helm.ErrConfirmationMissing) {
if !flags.yes {
cmd.PrintErrln("WARNING: Upgrading cert-manager will destroy all custom resources you have manually created that are based on the current version of cert-manager.")
@ -93,7 +129,7 @@ func handleServiceUpgrade(cmd *cobra.Command, upgrader cloudUpgrader, conf *conf
return nil
}
}
err = upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive)
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive)
}
if err != nil {
return fmt.Errorf("upgrading helm: %w", err)
@ -102,15 +138,38 @@ func handleServiceUpgrade(cmd *cobra.Command, upgrader cloudUpgrader, conf *conf
return nil
}
func handleImageUpgrade(ctx context.Context, conf *config.Config, imageFetcher imageFetcher, upgrader cloudUpgrader) error {
// this config modification is temporary until we can remove the upgrade section from the config
conf.Image = conf.Upgrade.Image
func (u *upgradeApplyCmd) handleImageUpgrade(ctx context.Context, conf *config.Config, imageFetcher imageFetcher) error {
imageReference, err := imageFetcher.FetchReference(ctx, conf)
if err != nil {
return err
return fmt.Errorf("fetching image reference: %w", err)
}
return upgrader.UpgradeImage(ctx, imageReference, conf.Upgrade.Image, conf.Upgrade.Measurements)
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
if err != nil {
return fmt.Errorf("parsing version from image short path: %w", err)
}
err = u.upgrader.UpgradeImage(ctx, imageReference, imageVersion.Version, conf.Upgrade.Measurements)
if err != nil {
return fmt.Errorf("upgrading image: %w", err)
}
return nil
}
func (u *upgradeApplyCmd) handleK8sUpgrade(ctx context.Context, conf *config.Config) error {
currentVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion)
if err != nil {
return fmt.Errorf("getting Kubernetes version: %w", err)
}
versionConfig := versions.VersionConfigs[currentVersion]
err = u.upgrader.UpgradeK8s(ctx, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
if err != nil {
return fmt.Errorf("upgrading Kubernetes: %w", err)
}
return nil
}
func parseUpgradeApplyFlags(cmd *cobra.Command) (upgradeApplyFlags, error) {
@ -147,6 +206,7 @@ type upgradeApplyFlags struct {
type cloudUpgrader interface {
UpgradeImage(ctx context.Context, imageReference, imageVersion string, measurements measurements.M) error
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
UpgradeK8s(ctx context.Context, clusterVersion string, components components.Components) error
}
type imageFetcher interface {

View File

@ -17,6 +17,8 @@ import (
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions/components"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -40,7 +42,7 @@ func TestUpgradeApply(t *testing.T) {
wantErr: true,
},
"upgrade error": {
upgrader: stubUpgrader{err: errors.New("error")},
upgrader: stubUpgrader{imageErr: errors.New("error")},
wantErr: true,
},
}
@ -57,7 +59,8 @@ func TestUpgradeApply(t *testing.T) {
cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), cloudprovider.Azure)
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
err := upgradeApply(cmd, &tc.imageFetcher, tc.upgrader, handler)
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t)}
err := upgrader.upgradeApply(cmd, &tc.imageFetcher, handler)
if tc.wantErr {
assert.Error(err)
} else {
@ -68,18 +71,23 @@ func TestUpgradeApply(t *testing.T) {
}
type stubUpgrader struct {
err error
helmErr error
imageErr error
helmErr error
k8sErr error
}
func (u stubUpgrader) UpgradeImage(context.Context, string, string, measurements.M) error {
return u.err
return u.imageErr
}
func (u stubUpgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
return u.helmErr
}
func (u stubUpgrader) UpgradeK8s(ctx context.Context, clusterVersion string, components components.Components) error {
return u.k8sErr
}
type stubImageFetcher struct {
reference string
fetchReferenceErr error

View File

@ -32,8 +32,6 @@ import (
"github.com/spf13/afero"
"github.com/spf13/cobra"
"golang.org/x/mod/semver"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func newUpgradeCheckCmd() *cobra.Command {
@ -438,7 +436,7 @@ func (v *versionUpgrade) writeConfig(conf *config.Config, fileHandler file.Handl
// getCurrentImageVersion retrieves the semantic version of the image currently installed in the cluster.
// If the cluster is not using a release image, an error is returned.
func getCurrentImageVersion(ctx context.Context, checker upgradeChecker) (string, error) {
_, imageVersion, err := checker.GetCurrentImage(ctx)
imageVersion, err := checker.CurrentImage(ctx)
if err != nil {
return "", err
}
@ -452,7 +450,7 @@ func getCurrentImageVersion(ctx context.Context, checker upgradeChecker) (string
// getCurrentKubernetesVersion retrieves the semantic version of Kubernetes currently installed in the cluster.
func getCurrentKubernetesVersion(ctx context.Context, checker upgradeChecker) (string, error) {
_, k8sVersion, err := checker.GetCurrentKubernetesVersion(ctx)
k8sVersion, err := checker.CurrentKubernetesVersion(ctx)
if err != nil {
return "", err
}
@ -527,8 +525,8 @@ type upgradeCheckFlags struct {
}
type upgradeChecker interface {
GetCurrentImage(ctx context.Context) (*unstructured.Unstructured, string, error)
GetCurrentKubernetesVersion(ctx context.Context) (*unstructured.Unstructured, string, error)
CurrentImage(ctx context.Context) (string, error)
CurrentKubernetesVersion(ctx context.Context) (string, error)
}
type versionListFetcher interface {

View File

@ -26,7 +26,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/mod/semver"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// TestBuildString checks that the resulting user output is as expected. Slow part is the Sscanf in parseCanonicalSemver().
@ -295,10 +294,10 @@ type stubUpgradeChecker struct {
err error
}
func (u stubUpgradeChecker) GetCurrentImage(context.Context) (*unstructured.Unstructured, string, error) {
return nil, u.image, u.err
func (u stubUpgradeChecker) CurrentImage(context.Context) (string, error) {
return u.image, u.err
}
func (u stubUpgradeChecker) GetCurrentKubernetesVersion(ctx context.Context) (*unstructured.Unstructured, string, error) {
return nil, u.k8sVersion, u.err
func (u stubUpgradeChecker) CurrentKubernetesVersion(ctx context.Context) (string, error) {
return u.k8sVersion, u.err
}

View File

@ -45,12 +45,12 @@ func (f *Fetcher) FetchReference(ctx context.Context, config *config.Config) (st
provider := config.GetProvider()
variant, err := variant(provider, config)
if err != nil {
return "", err
return "", fmt.Errorf("determining variant: %w", err)
}
ver, err := versionsapi.NewVersionFromShortPath(config.Image, versionsapi.VersionKindImage)
if err != nil {
return "", err
return "", fmt.Errorf("parsing config image short path: %w", err)
}
imgInfoReq := versionsapi.ImageInfo{

8
go.mod
View File

@ -30,7 +30,10 @@ replace (
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.25.3
)
replace github.com/google/go-tpm-tools => github.com/daniel-weisse/go-tpm-tools v0.0.0-20230105122812-f7474d459dfc
replace (
github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api => ./operators/constellation-node-operator/api
github.com/google/go-tpm-tools => github.com/daniel-weisse/go-tpm-tools v0.0.0-20230105122812-f7474d459dfc
)
require (
cloud.google.com/go/compute v1.15.1
@ -59,6 +62,7 @@ require (
github.com/aws/smithy-go v1.13.5
github.com/coreos/go-systemd/v22 v22.5.0
github.com/docker/docker v20.10.22+incompatible
github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api v0.0.0
github.com/fsnotify/fsnotify v1.6.0
github.com/go-playground/locales v0.14.1
github.com/go-playground/universal-translator v0.18.0
@ -111,6 +115,8 @@ require (
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
)
require sigs.k8s.io/controller-runtime v0.13.1 // indirect
require (
cloud.google.com/go v0.107.0 // indirect
cloud.google.com/go/iam v0.8.0 // indirect

4
go.sum
View File

@ -1059,6 +1059,7 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
@ -2045,6 +2046,7 @@ gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
@ -2114,6 +2116,8 @@ pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg=
sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=

View File

@ -30,7 +30,10 @@ replace (
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.25.3
)
replace github.com/edgelesssys/constellation/v2 => ./..
replace (
github.com/edgelesssys/constellation/v2 => ./..
github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api => ./../operators/constellation-node-operator/api
)
require (
github.com/edgelesssys/constellation/v2 v2.0.0
@ -121,6 +124,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api v0.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
@ -297,6 +301,7 @@ require (
k8s.io/kubectl v0.25.2 // indirect
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
oras.land/oras-go v1.2.0 // indirect
sigs.k8s.io/controller-runtime v0.13.1 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/kustomize/api v0.12.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect

View File

@ -413,6 +413,7 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o=
github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw=
@ -1050,6 +1051,7 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
@ -2064,6 +2066,7 @@ gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
@ -2131,6 +2134,8 @@ pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg=
sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=

View File

@ -123,6 +123,8 @@ const (
K8sVersionFieldName = "cluster-version"
// ComponentsListKey is the name of the key holding the list of components in the components configMap.
ComponentsListKey = "components"
// NodeVersionResourceName resource name used for NodeVersion in constellation-operator and CLI.
NodeVersionResourceName = "constellation-version"
// NodeKubernetesComponentsAnnotationKey is the name of the annotation holding the reference to the ConfigMap listing all K8s components.
NodeKubernetesComponentsAnnotationKey = "constellation.edgeless.systems/kubernetes-components"

View File

@ -9,8 +9,6 @@ package constants
const (
// AutoscalingStrategyResourceName resource name used for AutoscalingStrategy.
AutoscalingStrategyResourceName = "autoscalingstrategy"
// NodeVersionResourceName resource name used for NodeVersion.
NodeVersionResourceName = "constellation-version"
// ControlPlaneScalingGroupResourceName resource name used for ControlPlaneScalingGroup.
ControlPlaneScalingGroupResourceName = "scalinggroup-controlplane"
// WorkerScalingGroupResourceName resource name used for WorkerScaling.

View File

@ -121,7 +121,7 @@ func createNodeVersion(ctx context.Context, k8sClient client.Client, imageRefere
err = k8sClient.Create(ctx, &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeVersionResourceName,
Name: mainconstants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: imageReference,
@ -176,7 +176,7 @@ func createScalingGroup(ctx context.Context, config newScalingGroupConfig) error
Name: strings.ToLower(config.groupName),
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeVersion: constants.NodeVersionResourceName,
NodeVersion: mainconstants.NodeVersionResourceName,
GroupID: config.groupID,
AutoscalerGroupName: config.autoscalingGroupName,
Min: 1,

View File

@ -196,7 +196,7 @@ func TestCreateNodeVersion(t *testing.T) {
wantNodeVersion: &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeVersionResourceName,
Name: mainconstants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-reference",
@ -211,11 +211,11 @@ func TestCreateNodeVersion(t *testing.T) {
wantErr: true,
},
"version exists": {
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, constants.NodeVersionResourceName),
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, mainconstants.NodeVersionResourceName),
existingNodeVersion: &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeVersionResourceName,
Name: mainconstants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-reference2",
@ -227,7 +227,7 @@ func TestCreateNodeVersion(t *testing.T) {
wantNodeVersion: &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeVersionResourceName,
Name: mainconstants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-reference2",
@ -286,7 +286,7 @@ func TestCreateScalingGroup(t *testing.T) {
Name: "group-name",
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeVersion: constants.NodeVersionResourceName,
NodeVersion: mainconstants.NodeVersionResourceName,
GroupID: "group-id",
AutoscalerGroupName: "group-Name",
Min: 1,
@ -307,7 +307,7 @@ func TestCreateScalingGroup(t *testing.T) {
Name: "group-name",
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeVersion: constants.NodeVersionResourceName,
NodeVersion: mainconstants.NodeVersionResourceName,
GroupID: "group-id",
AutoscalerGroupName: "group-Name",
Min: 1,