cli: fix misleading error while applying kubernetes-only upgrade (#1630)

* The check would previously fail if e.g. `apply` did not upgrade the
image, but a new image was specified in the config. This could
happen if the specified image was too new, but a valid Kuberentes
upgrade was specified.
* ci: fix variable expansion in e2e-upgrade call
* e2e: do not verify measurement signature
This commit is contained in:
Otto Bittner 2023-04-13 15:58:37 +02:00 committed by GitHub
parent ea86520f0b
commit d2967fff6b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 157 additions and 27 deletions

View File

@ -162,10 +162,10 @@ jobs:
echo "Microservice target: $MICROSERVICES" echo "Microservice target: $MICROSERVICES"
if [[ -n ${MICROSERVICES} ]]; then if [[ -n ${MICROSERVICES} ]]; then
MICROSERVICES_FLAG="--target-microservices $MICROSERVICES" MICROSERVICES_FLAG="--target-microservices=$MICROSERVICES"
fi fi
if [[ -n ${KUBERNETES} ]]; then if [[ -n ${KUBERNETES} ]]; then
KUBERNETES_FLAG="--target-kubernetes $KUBERNETES" KUBERNETES_FLAG="--target-kubernetes=$KUBERNETES"
fi fi
bazelisk run //e2e/internal/upgrade:upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --target-image "$IMAGE" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG" bazelisk run //e2e/internal/upgrade:upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --target-image "$IMAGE" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG"

View File

@ -54,6 +54,17 @@ func GetConstellationVersion(ctx context.Context, client DynamicInterface) (upda
return nodeVersion, nil return nodeVersion, nil
} }
// InvalidUpgradeError present an invalid upgrade. It wraps the source and destination version for improved debuggability.
type applyError struct {
expected string
actual string
}
// Error returns the String representation of this error.
func (e *applyError) Error() string {
return fmt.Sprintf("expected NodeVersion to contain %s, got %s", e.expected, e.actual)
}
// Upgrader handles upgrading the cluster's components using the CLI. // Upgrader handles upgrading the cluster's components using the CLI.
type Upgrader struct { type Upgrader struct {
stableInterface stableInterface stableInterface stableInterface
@ -162,14 +173,14 @@ func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config)
return fmt.Errorf("applying upgrade: %w", err) return fmt.Errorf("applying upgrade: %w", err)
} }
switch { switch {
case updatedNodeVersion.Spec.ImageReference != imageReference: case updatedNodeVersion.Spec.ImageReference != nodeVersion.Spec.ImageReference:
return fmt.Errorf("expected NodeVersion to contain %s, got %s", imageReference, updatedNodeVersion.Spec.ImageReference) return &applyError{expected: nodeVersion.Spec.ImageReference, actual: updatedNodeVersion.Spec.ImageReference}
case updatedNodeVersion.Spec.ImageVersion != imageVersion.Version: case updatedNodeVersion.Spec.ImageVersion != nodeVersion.Spec.ImageVersion:
return fmt.Errorf("expected NodeVersion to contain %s, got %s", imageVersion.Version, updatedNodeVersion.Spec.ImageVersion) return &applyError{expected: nodeVersion.Spec.ImageVersion, actual: updatedNodeVersion.Spec.ImageVersion}
case updatedNodeVersion.Spec.KubernetesComponentsReference != nodeVersion.Spec.KubernetesComponentsReference: case updatedNodeVersion.Spec.KubernetesComponentsReference != nodeVersion.Spec.KubernetesComponentsReference:
return fmt.Errorf("expected NodeVersion to contain %s, got %s", nodeVersion.Spec.KubernetesComponentsReference, updatedNodeVersion.Spec.KubernetesComponentsReference) return &applyError{expected: nodeVersion.Spec.KubernetesComponentsReference, actual: updatedNodeVersion.Spec.KubernetesComponentsReference}
case updatedNodeVersion.Spec.KubernetesClusterVersion != versionConfig.ClusterVersion: case updatedNodeVersion.Spec.KubernetesClusterVersion != nodeVersion.Spec.KubernetesClusterVersion:
return fmt.Errorf("expected NodeVersion to contain %s, got %s", versionConfig.ClusterVersion, updatedNodeVersion.Spec.KubernetesClusterVersion) return &applyError{expected: nodeVersion.Spec.KubernetesClusterVersion, actual: updatedNodeVersion.Spec.KubernetesClusterVersion}
} }
return errors.Join(upgradeErrs...) return errors.Join(upgradeErrs...)

View File

@ -35,6 +35,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
stable *stubStableClient stable *stubStableClient
conditions []metav1.Condition conditions []metav1.Condition
currentImageVersion string currentImageVersion string
newImageReference string
badImageVersion string
currentClusterVersion string currentClusterVersion string
conf *config.Config conf *config.Config
getErr error getErr error
@ -75,7 +77,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
wantUpdate: true, wantUpdate: true,
wantErr: true, wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool { assertCorrectError: func(t *testing.T, err error) bool {
upgradeErr := &compatibility.InvalidUpgradeError{} var upgradeErr *compatibility.InvalidUpgradeError
return assert.ErrorAs(t, err, &upgradeErr) return assert.ErrorAs(t, err, &upgradeErr)
}, },
}, },
@ -96,7 +98,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
wantUpdate: true, wantUpdate: true,
wantErr: true, wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool { assertCorrectError: func(t *testing.T, err error) bool {
upgradeErr := &compatibility.InvalidUpgradeError{} var upgradeErr *compatibility.InvalidUpgradeError
return assert.ErrorAs(t, err, &upgradeErr) return assert.ErrorAs(t, err, &upgradeErr)
}, },
}, },
@ -112,7 +114,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
stable: &stubStableClient{}, stable: &stubStableClient{},
wantErr: true, wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool { assertCorrectError: func(t *testing.T, err error) bool {
upgradeErr := &compatibility.InvalidUpgradeError{} var upgradeErr *compatibility.InvalidUpgradeError
return assert.ErrorAs(t, err, &upgradeErr) return assert.ErrorAs(t, err, &upgradeErr)
}, },
}, },
@ -147,6 +149,50 @@ func TestUpgradeNodeVersion(t *testing.T) {
return assert.ErrorIs(t, err, someErr) return assert.ErrorIs(t, err, someErr)
}, },
}, },
"image too new valid k8s": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.4.2"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
newImageReference: "path/to/image:v1.4.2",
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
stable: &stubStableClient{
configMaps: map[string]*corev1.ConfigMap{
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
},
},
wantUpdate: true,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
upgradeErr := &compatibility.InvalidUpgradeError{}
return assert.ErrorAs(t, err, &upgradeErr)
},
},
"apply returns bad object": {
conf: func() *config.Config {
conf := config.Default()
conf.Image = "v1.2.3"
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
return conf
}(),
currentImageVersion: "v1.2.2",
currentClusterVersion: versions.SupportedK8sVersions()[0],
badImageVersion: "v3.2.1",
stable: &stubStableClient{
configMaps: map[string]*corev1.ConfigMap{
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
},
},
wantUpdate: true,
wantErr: true,
assertCorrectError: func(t *testing.T, err error) bool {
var target *applyError
return assert.ErrorAs(t, err, &target)
},
},
} }
for name, tc := range testCases { for name, tc := range testCases {
@ -167,11 +213,19 @@ func TestUpgradeNodeVersion(t *testing.T) {
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion) unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
require.NoError(err) require.NoError(err)
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr} var badUpdatedObject *unstructured.Unstructured
if tc.badImageVersion != "" {
nodeVersion.Spec.ImageVersion = tc.badImageVersion
unstrBadNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
require.NoError(err)
badUpdatedObject = &unstructured.Unstructured{Object: unstrBadNodeVersion}
}
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, badUpdatedObject: badUpdatedObject, getErr: tc.getErr}
upgrader := Upgrader{ upgrader := Upgrader{
stableInterface: tc.stable, stableInterface: tc.stable,
dynamicInterface: dynamicClient, dynamicInterface: dynamicClient,
imageFetcher: &stubImageFetcher{}, imageFetcher: &stubImageFetcher{reference: tc.newImageReference},
log: logger.NewTest(t), log: logger.NewTest(t),
outWriter: io.Discard, outWriter: io.Discard,
} }
@ -420,10 +474,11 @@ func newJoinConfigMap(data string) *corev1.ConfigMap {
} }
type stubDynamicClient struct { type stubDynamicClient struct {
object *unstructured.Unstructured object *unstructured.Unstructured
updatedObject *unstructured.Unstructured updatedObject *unstructured.Unstructured
getErr error badUpdatedObject *unstructured.Unstructured
updateErr error getErr error
updateErr error
} }
func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) { func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) {
@ -432,6 +487,9 @@ func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructur
func (u *stubDynamicClient) Update(_ context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) { func (u *stubDynamicClient) Update(_ context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
u.updatedObject = updatedObject u.updatedObject = updatedObject
if u.badUpdatedObject != nil {
return u.badUpdatedObject, u.updateErr
}
return u.updatedObject, u.updateErr return u.updatedObject, u.updateErr
} }

View File

@ -17,6 +17,7 @@ go_library(
"//internal/logger", "//internal/logger",
"//internal/versionsapi", "//internal/versionsapi",
"//internal/versionsapi/fetcher", "//internal/versionsapi/fetcher",
"@in_gopkg_yaml_v3//:yaml_v3",
"@sh_helm_helm_v3//pkg/action", "@sh_helm_helm_v3//pkg/action",
"@sh_helm_helm_v3//pkg/cli", "@sh_helm_helm_v3//pkg/cli",
], ],

View File

@ -10,12 +10,17 @@ package upgrade
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt"
"io"
"net/http" "net/http"
"net/url"
"gopkg.in/yaml.v3"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/versionsapi" "github.com/edgelesssys/constellation/v2/internal/versionsapi"
"github.com/edgelesssys/constellation/v2/internal/versionsapi/fetcher" "github.com/edgelesssys/constellation/v2/internal/versionsapi/fetcher"
) )
@ -38,17 +43,14 @@ func fetchUpgradeInfo(ctx context.Context, csp cloudprovider.Provider, toImage s
return upgradeInfo{}, err return upgradeInfo{}, err
} }
measurementsURL, signatureURL, err := versionsapi.MeasurementURL(ver, csp) measurementsURL, _, err := versionsapi.MeasurementURL(ver, csp)
if err != nil { if err != nil {
return upgradeInfo{}, err return upgradeInfo{}, err
} }
var fetchedMeasurements measurements.M fetchedMeasurements, err := fetchMeasurements(
_, err = fetchedMeasurements.FetchAndVerify(
ctx, http.DefaultClient, ctx, http.DefaultClient,
measurementsURL, measurementsURL,
signatureURL,
[]byte(constants.CosignPublicKey),
measurements.WithMetadata{ measurements.WithMetadata{
CSP: csp, CSP: csp,
Image: toImage, Image: toImage,
@ -72,6 +74,56 @@ func fetchUpgradeInfo(ctx context.Context, csp cloudprovider.Provider, toImage s
return info, nil return info, nil
} }
// fetchMeasurements is essentially a copy of measurements.FetchAndVerify, but with verification removed.
// This is necessary since the e2e tests may target release images for which the measurements are signed with the release public key.
// It is easier to skip verification than to implement a second bazel target with the enterprise build tag set.
func fetchMeasurements(ctx context.Context, client *http.Client, measurementsURL *url.URL, metadata measurements.WithMetadata) (measurements.M, error) {
measurementsRaw, err := getFromURL(ctx, client, measurementsURL)
if err != nil {
return nil, fmt.Errorf("failed to fetch measurements: %w", err)
}
var mWithMetadata measurements.WithMetadata
if err := json.Unmarshal(measurementsRaw, &mWithMetadata); err != nil {
if yamlErr := yaml.Unmarshal(measurementsRaw, &mWithMetadata); yamlErr != nil {
return nil, errors.Join(
err,
fmt.Errorf("trying yaml format: %w", yamlErr),
)
}
}
if mWithMetadata.CSP != metadata.CSP {
return nil, fmt.Errorf("invalid measurement metadata: CSP mismatch: expected %s, got %s", metadata.CSP, mWithMetadata.CSP)
}
if mWithMetadata.Image != metadata.Image {
return nil, fmt.Errorf("invalid measurement metadata: image mismatch: expected %s, got %s", metadata.Image, mWithMetadata.Image)
}
return mWithMetadata.Measurements, nil
}
func getFromURL(ctx context.Context, client *http.Client, sourceURL *url.URL) ([]byte, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, sourceURL.String(), http.NoBody)
if err != nil {
return []byte{}, err
}
resp, err := client.Do(req)
if err != nil {
return []byte{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return []byte{}, fmt.Errorf("http status code: %d", resp.StatusCode)
}
content, err := io.ReadAll(resp.Body)
if err != nil {
return []byte{}, err
}
return content, nil
}
func fetchWantImage(ctx context.Context, client *fetcher.Fetcher, csp cloudprovider.Provider, imageInfo versionsapi.ImageInfo) (string, error) { func fetchWantImage(ctx context.Context, client *fetcher.Fetcher, csp cloudprovider.Provider, imageInfo versionsapi.ImageInfo) (string, error) {
imageInfo, err := client.FetchImageInfo(ctx, imageInfo) imageInfo, err := client.FetchImageInfo(ctx, imageInfo)
if err != nil { if err != nil {

View File

@ -107,6 +107,14 @@ func TestUpgrade(t *testing.T) {
require.NoError(containsUnexepectedMsg(string(msg))) require.NoError(containsUnexepectedMsg(string(msg)))
log.Println(string(msg)) log.Println(string(msg))
// Show versions set in cluster.
// The string after "Cluster status:" in the output might not be updated yet.
// This is only updated after the operator finishes one reconcile loop.
cmd = exec.CommandContext(context.Background(), cli, "status")
msg, err = cmd.CombinedOutput()
require.NoError(err, string(msg))
log.Println(string(msg))
testMicroservicesEventuallyHaveVersion(t, targetVersions.microservices, *timeout) testMicroservicesEventuallyHaveVersion(t, targetVersions.microservices, *timeout)
testNodesEventuallyHaveVersion(t, k, targetVersions, *wantControl+*wantWorker, *timeout) testNodesEventuallyHaveVersion(t, k, targetVersions, *wantControl+*wantWorker, *timeout)
} }
@ -196,7 +204,7 @@ func testMicroservicesEventuallyHaveVersion(t *testing.T, wantMicroserviceVersio
} }
if version != wantMicroserviceVersion { if version != wantMicroserviceVersion {
log.Printf("Microservices still at version: %v\n", version) log.Printf("Microservices still at version %v, want %v\n", version, wantMicroserviceVersion)
return false return false
} }
@ -227,12 +235,12 @@ func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targe
kubeletVersion := node.Status.NodeInfo.KubeletVersion kubeletVersion := node.Status.NodeInfo.KubeletVersion
if kubeletVersion != targetVersions.kubernetes.String() { if kubeletVersion != targetVersions.kubernetes.String() {
log.Printf("\t%s: K8s (Kubelet) %s\n", node.Name, kubeletVersion) log.Printf("\t%s: K8s (Kubelet) %s, want %s\n", node.Name, kubeletVersion, targetVersions.kubernetes.String())
allUpdated = false allUpdated = false
} }
kubeProxyVersion := node.Status.NodeInfo.KubeProxyVersion kubeProxyVersion := node.Status.NodeInfo.KubeProxyVersion
if kubeProxyVersion != targetVersions.kubernetes.String() { if kubeProxyVersion != targetVersions.kubernetes.String() {
log.Printf("\t%s: K8s (Proxy) %s\n", node.Name, kubeProxyVersion) log.Printf("\t%s: K8s (Proxy) %s, want %s\n", node.Name, kubeProxyVersion, targetVersions.kubernetes.String())
allUpdated = false allUpdated = false
} }
} }