mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-25 15:39:37 -05:00
cli: fix misleading error while applying kubernetes-only upgrade (#1630)
* The check would previously fail if e.g. `apply` did not upgrade the image, but a new image was specified in the config. This could happen if the specified image was too new, but a valid Kuberentes upgrade was specified. * ci: fix variable expansion in e2e-upgrade call * e2e: do not verify measurement signature
This commit is contained in:
parent
ea86520f0b
commit
d2967fff6b
4
.github/workflows/e2e-upgrade.yml
vendored
4
.github/workflows/e2e-upgrade.yml
vendored
@ -162,10 +162,10 @@ jobs:
|
||||
echo "Microservice target: $MICROSERVICES"
|
||||
|
||||
if [[ -n ${MICROSERVICES} ]]; then
|
||||
MICROSERVICES_FLAG="--target-microservices $MICROSERVICES"
|
||||
MICROSERVICES_FLAG="--target-microservices=$MICROSERVICES"
|
||||
fi
|
||||
if [[ -n ${KUBERNETES} ]]; then
|
||||
KUBERNETES_FLAG="--target-kubernetes $KUBERNETES"
|
||||
KUBERNETES_FLAG="--target-kubernetes=$KUBERNETES"
|
||||
fi
|
||||
|
||||
bazelisk run //e2e/internal/upgrade:upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --target-image "$IMAGE" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG"
|
||||
|
@ -54,6 +54,17 @@ func GetConstellationVersion(ctx context.Context, client DynamicInterface) (upda
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// InvalidUpgradeError present an invalid upgrade. It wraps the source and destination version for improved debuggability.
|
||||
type applyError struct {
|
||||
expected string
|
||||
actual string
|
||||
}
|
||||
|
||||
// Error returns the String representation of this error.
|
||||
func (e *applyError) Error() string {
|
||||
return fmt.Sprintf("expected NodeVersion to contain %s, got %s", e.expected, e.actual)
|
||||
}
|
||||
|
||||
// Upgrader handles upgrading the cluster's components using the CLI.
|
||||
type Upgrader struct {
|
||||
stableInterface stableInterface
|
||||
@ -162,14 +173,14 @@ func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config)
|
||||
return fmt.Errorf("applying upgrade: %w", err)
|
||||
}
|
||||
switch {
|
||||
case updatedNodeVersion.Spec.ImageReference != imageReference:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", imageReference, updatedNodeVersion.Spec.ImageReference)
|
||||
case updatedNodeVersion.Spec.ImageVersion != imageVersion.Version:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", imageVersion.Version, updatedNodeVersion.Spec.ImageVersion)
|
||||
case updatedNodeVersion.Spec.ImageReference != nodeVersion.Spec.ImageReference:
|
||||
return &applyError{expected: nodeVersion.Spec.ImageReference, actual: updatedNodeVersion.Spec.ImageReference}
|
||||
case updatedNodeVersion.Spec.ImageVersion != nodeVersion.Spec.ImageVersion:
|
||||
return &applyError{expected: nodeVersion.Spec.ImageVersion, actual: updatedNodeVersion.Spec.ImageVersion}
|
||||
case updatedNodeVersion.Spec.KubernetesComponentsReference != nodeVersion.Spec.KubernetesComponentsReference:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", nodeVersion.Spec.KubernetesComponentsReference, updatedNodeVersion.Spec.KubernetesComponentsReference)
|
||||
case updatedNodeVersion.Spec.KubernetesClusterVersion != versionConfig.ClusterVersion:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", versionConfig.ClusterVersion, updatedNodeVersion.Spec.KubernetesClusterVersion)
|
||||
return &applyError{expected: nodeVersion.Spec.KubernetesComponentsReference, actual: updatedNodeVersion.Spec.KubernetesComponentsReference}
|
||||
case updatedNodeVersion.Spec.KubernetesClusterVersion != nodeVersion.Spec.KubernetesClusterVersion:
|
||||
return &applyError{expected: nodeVersion.Spec.KubernetesClusterVersion, actual: updatedNodeVersion.Spec.KubernetesClusterVersion}
|
||||
}
|
||||
|
||||
return errors.Join(upgradeErrs...)
|
||||
|
@ -35,6 +35,8 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
stable *stubStableClient
|
||||
conditions []metav1.Condition
|
||||
currentImageVersion string
|
||||
newImageReference string
|
||||
badImageVersion string
|
||||
currentClusterVersion string
|
||||
conf *config.Config
|
||||
getErr error
|
||||
@ -75,7 +77,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
@ -96,7 +98,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
@ -112,7 +114,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
stable: &stubStableClient{},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
@ -147,6 +149,50 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
return assert.ErrorIs(t, err, someErr)
|
||||
},
|
||||
},
|
||||
"image too new valid k8s": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.4.2"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
newImageReference: "path/to/image:v1.4.2",
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"apply returns bad object": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
badImageVersion: "v3.2.1",
|
||||
stable: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var target *applyError
|
||||
return assert.ErrorAs(t, err, &target)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -167,11 +213,19 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
|
||||
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr}
|
||||
var badUpdatedObject *unstructured.Unstructured
|
||||
if tc.badImageVersion != "" {
|
||||
nodeVersion.Spec.ImageVersion = tc.badImageVersion
|
||||
unstrBadNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
badUpdatedObject = &unstructured.Unstructured{Object: unstrBadNodeVersion}
|
||||
}
|
||||
|
||||
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, badUpdatedObject: badUpdatedObject, getErr: tc.getErr}
|
||||
upgrader := Upgrader{
|
||||
stableInterface: tc.stable,
|
||||
dynamicInterface: dynamicClient,
|
||||
imageFetcher: &stubImageFetcher{},
|
||||
imageFetcher: &stubImageFetcher{reference: tc.newImageReference},
|
||||
log: logger.NewTest(t),
|
||||
outWriter: io.Discard,
|
||||
}
|
||||
@ -420,10 +474,11 @@ func newJoinConfigMap(data string) *corev1.ConfigMap {
|
||||
}
|
||||
|
||||
type stubDynamicClient struct {
|
||||
object *unstructured.Unstructured
|
||||
updatedObject *unstructured.Unstructured
|
||||
getErr error
|
||||
updateErr error
|
||||
object *unstructured.Unstructured
|
||||
updatedObject *unstructured.Unstructured
|
||||
badUpdatedObject *unstructured.Unstructured
|
||||
getErr error
|
||||
updateErr error
|
||||
}
|
||||
|
||||
func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) {
|
||||
@ -432,6 +487,9 @@ func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructur
|
||||
|
||||
func (u *stubDynamicClient) Update(_ context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
u.updatedObject = updatedObject
|
||||
if u.badUpdatedObject != nil {
|
||||
return u.badUpdatedObject, u.updateErr
|
||||
}
|
||||
return u.updatedObject, u.updateErr
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ go_library(
|
||||
"//internal/logger",
|
||||
"//internal/versionsapi",
|
||||
"//internal/versionsapi/fetcher",
|
||||
"@in_gopkg_yaml_v3//:yaml_v3",
|
||||
"@sh_helm_helm_v3//pkg/action",
|
||||
"@sh_helm_helm_v3//pkg/cli",
|
||||
],
|
||||
|
@ -10,12 +10,17 @@ package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versionsapi/fetcher"
|
||||
)
|
||||
@ -38,17 +43,14 @@ func fetchUpgradeInfo(ctx context.Context, csp cloudprovider.Provider, toImage s
|
||||
return upgradeInfo{}, err
|
||||
}
|
||||
|
||||
measurementsURL, signatureURL, err := versionsapi.MeasurementURL(ver, csp)
|
||||
measurementsURL, _, err := versionsapi.MeasurementURL(ver, csp)
|
||||
if err != nil {
|
||||
return upgradeInfo{}, err
|
||||
}
|
||||
|
||||
var fetchedMeasurements measurements.M
|
||||
_, err = fetchedMeasurements.FetchAndVerify(
|
||||
fetchedMeasurements, err := fetchMeasurements(
|
||||
ctx, http.DefaultClient,
|
||||
measurementsURL,
|
||||
signatureURL,
|
||||
[]byte(constants.CosignPublicKey),
|
||||
measurements.WithMetadata{
|
||||
CSP: csp,
|
||||
Image: toImage,
|
||||
@ -72,6 +74,56 @@ func fetchUpgradeInfo(ctx context.Context, csp cloudprovider.Provider, toImage s
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// fetchMeasurements is essentially a copy of measurements.FetchAndVerify, but with verification removed.
|
||||
// This is necessary since the e2e tests may target release images for which the measurements are signed with the release public key.
|
||||
// It is easier to skip verification than to implement a second bazel target with the enterprise build tag set.
|
||||
func fetchMeasurements(ctx context.Context, client *http.Client, measurementsURL *url.URL, metadata measurements.WithMetadata) (measurements.M, error) {
|
||||
measurementsRaw, err := getFromURL(ctx, client, measurementsURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch measurements: %w", err)
|
||||
}
|
||||
|
||||
var mWithMetadata measurements.WithMetadata
|
||||
if err := json.Unmarshal(measurementsRaw, &mWithMetadata); err != nil {
|
||||
if yamlErr := yaml.Unmarshal(measurementsRaw, &mWithMetadata); yamlErr != nil {
|
||||
return nil, errors.Join(
|
||||
err,
|
||||
fmt.Errorf("trying yaml format: %w", yamlErr),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if mWithMetadata.CSP != metadata.CSP {
|
||||
return nil, fmt.Errorf("invalid measurement metadata: CSP mismatch: expected %s, got %s", metadata.CSP, mWithMetadata.CSP)
|
||||
}
|
||||
if mWithMetadata.Image != metadata.Image {
|
||||
return nil, fmt.Errorf("invalid measurement metadata: image mismatch: expected %s, got %s", metadata.Image, mWithMetadata.Image)
|
||||
}
|
||||
|
||||
return mWithMetadata.Measurements, nil
|
||||
}
|
||||
|
||||
func getFromURL(ctx context.Context, client *http.Client, sourceURL *url.URL) ([]byte, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, sourceURL.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return []byte{}, fmt.Errorf("http status code: %d", resp.StatusCode)
|
||||
}
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return content, nil
|
||||
}
|
||||
|
||||
func fetchWantImage(ctx context.Context, client *fetcher.Fetcher, csp cloudprovider.Provider, imageInfo versionsapi.ImageInfo) (string, error) {
|
||||
imageInfo, err := client.FetchImageInfo(ctx, imageInfo)
|
||||
if err != nil {
|
||||
|
@ -107,6 +107,14 @@ func TestUpgrade(t *testing.T) {
|
||||
require.NoError(containsUnexepectedMsg(string(msg)))
|
||||
log.Println(string(msg))
|
||||
|
||||
// Show versions set in cluster.
|
||||
// The string after "Cluster status:" in the output might not be updated yet.
|
||||
// This is only updated after the operator finishes one reconcile loop.
|
||||
cmd = exec.CommandContext(context.Background(), cli, "status")
|
||||
msg, err = cmd.CombinedOutput()
|
||||
require.NoError(err, string(msg))
|
||||
log.Println(string(msg))
|
||||
|
||||
testMicroservicesEventuallyHaveVersion(t, targetVersions.microservices, *timeout)
|
||||
testNodesEventuallyHaveVersion(t, k, targetVersions, *wantControl+*wantWorker, *timeout)
|
||||
}
|
||||
@ -196,7 +204,7 @@ func testMicroservicesEventuallyHaveVersion(t *testing.T, wantMicroserviceVersio
|
||||
}
|
||||
|
||||
if version != wantMicroserviceVersion {
|
||||
log.Printf("Microservices still at version: %v\n", version)
|
||||
log.Printf("Microservices still at version %v, want %v\n", version, wantMicroserviceVersion)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -227,12 +235,12 @@ func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targe
|
||||
|
||||
kubeletVersion := node.Status.NodeInfo.KubeletVersion
|
||||
if kubeletVersion != targetVersions.kubernetes.String() {
|
||||
log.Printf("\t%s: K8s (Kubelet) %s\n", node.Name, kubeletVersion)
|
||||
log.Printf("\t%s: K8s (Kubelet) %s, want %s\n", node.Name, kubeletVersion, targetVersions.kubernetes.String())
|
||||
allUpdated = false
|
||||
}
|
||||
kubeProxyVersion := node.Status.NodeInfo.KubeProxyVersion
|
||||
if kubeProxyVersion != targetVersions.kubernetes.String() {
|
||||
log.Printf("\t%s: K8s (Proxy) %s\n", node.Name, kubeProxyVersion)
|
||||
log.Printf("\t%s: K8s (Proxy) %s, want %s\n", node.Name, kubeProxyVersion, targetVersions.kubernetes.String())
|
||||
allUpdated = false
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user