mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-12-15 16:09:39 -05:00
cli: refactor kubernetes package (#2232)
* Clean up CLI kubernetes package * Rename CLI kubernetes pkg to kubecmd * Unify kubernetes clients * Refactor attestation config upgrade * Update CODEOWNERS file * Remove outdated GetMeasurementSalt --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
3bf316e28f
commit
afa7fd0edb
24 changed files with 1024 additions and 1160 deletions
|
|
@ -45,7 +45,7 @@ go_library(
|
|||
"//cli/internal/cmd/pathprefix",
|
||||
"//cli/internal/featureset",
|
||||
"//cli/internal/helm",
|
||||
"//cli/internal/kubernetes",
|
||||
"//cli/internal/kubecmd",
|
||||
"//cli/internal/libvirt",
|
||||
"//cli/internal/terraform",
|
||||
"//cli/internal/upgrade",
|
||||
|
|
@ -78,7 +78,6 @@ go_library(
|
|||
"//internal/sigstore",
|
||||
"//internal/sigstore/keyselect",
|
||||
"//internal/versions",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
||||
"//verify/verifyproto",
|
||||
"@com_github_golang_jwt_jwt_v5//:jwt",
|
||||
"@com_github_google_go_sev_guest//abi",
|
||||
|
|
@ -91,9 +90,7 @@ go_library(
|
|||
"@com_github_spf13_afero//:afero",
|
||||
"@com_github_spf13_cobra//:cobra",
|
||||
"@in_gopkg_yaml_v3//:yaml_v3",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@io_k8s_client_go//dynamic",
|
||||
"@io_k8s_client_go//tools/clientcmd",
|
||||
"@io_k8s_client_go//tools/clientcmd/api/latest",
|
||||
"@io_k8s_sigs_yaml//:yaml",
|
||||
|
|
@ -140,7 +137,7 @@ go_test(
|
|||
"//cli/internal/clusterid",
|
||||
"//cli/internal/cmd/pathprefix",
|
||||
"//cli/internal/helm",
|
||||
"//cli/internal/kubernetes",
|
||||
"//cli/internal/kubecmd",
|
||||
"//cli/internal/terraform",
|
||||
"//cli/internal/upgrade",
|
||||
"//disk-mapper/recoverproto",
|
||||
|
|
@ -172,8 +169,6 @@ go_test(
|
|||
"@com_github_stretchr_testify//require",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//status",
|
||||
|
|
|
|||
|
|
@ -13,20 +13,16 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// NewStatusCmd returns a new cobra.Command for the statuus command.
|
||||
|
|
@ -50,38 +46,17 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
defer log.Sync()
|
||||
|
||||
kubeClient := kubectl.New()
|
||||
|
||||
flags, err := parseStatusFlags(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing flags: %w", err)
|
||||
}
|
||||
|
||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
kubeConfig, err := fileHandler.Read(constants.AdminConfFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading admin.conf: %w", err)
|
||||
}
|
||||
|
||||
// need kubectl client to fetch nodes.
|
||||
if err := kubeClient.Initialize(kubeConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating k8s client config from kubeconfig: %w", err)
|
||||
}
|
||||
// need unstructed client to fetch NodeVersion CRD.
|
||||
unstructuredClient, err := dynamic.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up custom resource client: %w", err)
|
||||
}
|
||||
|
||||
// need helm client to fetch service versions.
|
||||
// The client used here, doesn't need to know the current workspace.
|
||||
// It may be refactored in the future for easier usage.
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.NewUninitialized(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
|
|
@ -97,11 +72,12 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
variant := conf.GetAttestationConfig().GetVariant()
|
||||
|
||||
stableClient, err := kubernetes.NewStableClient(constants.AdminConfFilename)
|
||||
kubeClient, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up stable client: %w", err)
|
||||
return fmt.Errorf("setting up kubernetes client: %w", err)
|
||||
}
|
||||
output, err := status(cmd.Context(), kubeClient, stableClient, helmVersionGetter, kubernetes.NewNodeVersionClient(unstructuredClient), variant)
|
||||
|
||||
output, err := status(cmd.Context(), helmVersionGetter, kubeClient, variant)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting status: %w", err)
|
||||
}
|
||||
|
|
@ -111,19 +87,14 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
// status queries the cluster for the relevant status information and returns the output string.
|
||||
func status(
|
||||
ctx context.Context, kubeClient kubeClient, cmClient configMapClient, getHelmVersions func() (fmt.Stringer, error),
|
||||
dynamicInterface kubernetes.DynamicInterface, attestVariant variant.Variant,
|
||||
func status(ctx context.Context, getHelmVersions func() (fmt.Stringer, error), kubeClient kubeCmd, attestVariant variant.Variant,
|
||||
) (string, error) {
|
||||
nodeVersion, err := kubernetes.GetConstellationVersion(ctx, dynamicInterface)
|
||||
nodeVersion, err := kubeClient.GetConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting constellation version: %w", err)
|
||||
}
|
||||
if len(nodeVersion.Status.Conditions) != 1 {
|
||||
return "", fmt.Errorf("expected exactly one condition, got %d", len(nodeVersion.Status.Conditions))
|
||||
}
|
||||
|
||||
attestationConfig, err := getAttestationConfig(ctx, cmClient, attestVariant)
|
||||
attestationConfig, err := kubeClient.GetClusterAttestationConfig(ctx, attestVariant)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting attestation config: %w", err)
|
||||
}
|
||||
|
|
@ -132,51 +103,30 @@ func status(
|
|||
return "", fmt.Errorf("marshalling attestation config: %w", err)
|
||||
}
|
||||
|
||||
targetVersions, err := kubernetes.NewTargetVersions(nodeVersion)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting configured versions: %w", err)
|
||||
}
|
||||
|
||||
serviceVersions, err := getHelmVersions()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting service versions: %w", err)
|
||||
}
|
||||
|
||||
status, err := kubernetes.ClusterStatus(ctx, kubeClient)
|
||||
status, err := kubeClient.ClusterStatus(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting cluster status: %w", err)
|
||||
}
|
||||
|
||||
return statusOutput(targetVersions, serviceVersions, status, nodeVersion, string(prettyYAML)), nil
|
||||
}
|
||||
|
||||
func getAttestationConfig(ctx context.Context, cmClient configMapClient, attestVariant variant.Variant) (config.AttestationCfg, error) {
|
||||
joinConfig, err := cmClient.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting current config map: %w", err)
|
||||
}
|
||||
rawAttestationConfig, ok := joinConfig.Data[constants.AttestationConfigFilename]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("attestationConfig not found in %s", constants.JoinConfigMap)
|
||||
}
|
||||
attestationConfig, err := config.UnmarshalAttestationConfig([]byte(rawAttestationConfig), attestVariant)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshalling attestation config: %w", err)
|
||||
}
|
||||
return attestationConfig, nil
|
||||
return statusOutput(nodeVersion, serviceVersions, status, string(prettyYAML)), nil
|
||||
}
|
||||
|
||||
// statusOutput creates the status cmd output string by formatting the received information.
|
||||
func statusOutput(
|
||||
targetVersions kubernetes.TargetVersions, serviceVersions fmt.Stringer,
|
||||
status map[string]kubernetes.NodeStatus, nodeVersion v1alpha1.NodeVersion, rawAttestationConfig string,
|
||||
nodeVersion kubecmd.NodeVersion, serviceVersions fmt.Stringer,
|
||||
status map[string]kubecmd.NodeStatus, rawAttestationConfig string,
|
||||
) string {
|
||||
builder := strings.Builder{}
|
||||
|
||||
builder.WriteString(targetVersionsString(targetVersions))
|
||||
builder.WriteString(targetVersionsString(nodeVersion))
|
||||
builder.WriteString(serviceVersions.String())
|
||||
builder.WriteString(fmt.Sprintf("Cluster status: %s\n", nodeVersion.Status.Conditions[0].Message))
|
||||
builder.WriteString(nodeStatusString(status, targetVersions))
|
||||
builder.WriteString(fmt.Sprintf("Cluster status: %s\n", nodeVersion.ClusterStatus()))
|
||||
builder.WriteString(nodeStatusString(status, nodeVersion))
|
||||
builder.WriteString(fmt.Sprintf("Attestation config:\n%s", indentEntireStringWithTab(rawAttestationConfig)))
|
||||
return builder.String()
|
||||
}
|
||||
|
|
@ -190,14 +140,14 @@ func indentEntireStringWithTab(input string) string {
|
|||
}
|
||||
|
||||
// nodeStatusString creates the node status part of the output string.
|
||||
func nodeStatusString(status map[string]kubernetes.NodeStatus, targetVersions kubernetes.TargetVersions) string {
|
||||
func nodeStatusString(status map[string]kubecmd.NodeStatus, targetVersions kubecmd.NodeVersion) string {
|
||||
var upToDateImages int
|
||||
var upToDateK8s int
|
||||
for _, node := range status {
|
||||
if node.KubeletVersion() == targetVersions.Kubernetes() {
|
||||
if node.KubeletVersion() == targetVersions.KubernetesVersion() {
|
||||
upToDateK8s++
|
||||
}
|
||||
if node.ImageVersion() == targetVersions.ImagePath() {
|
||||
if node.ImageVersion() == targetVersions.ImageReference() {
|
||||
upToDateImages++
|
||||
}
|
||||
}
|
||||
|
|
@ -212,11 +162,11 @@ func nodeStatusString(status map[string]kubernetes.NodeStatus, targetVersions ku
|
|||
}
|
||||
|
||||
// targetVersionsString creates the target versions part of the output string.
|
||||
func targetVersionsString(target kubernetes.TargetVersions) string {
|
||||
func targetVersionsString(target kubecmd.NodeVersion) string {
|
||||
builder := strings.Builder{}
|
||||
builder.WriteString("Target versions:\n")
|
||||
builder.WriteString(fmt.Sprintf("\tImage: %s\n", target.Image()))
|
||||
builder.WriteString(fmt.Sprintf("\tKubernetes: %s\n", target.Kubernetes()))
|
||||
builder.WriteString(fmt.Sprintf("\tImage: %s\n", target.ImageVersion()))
|
||||
builder.WriteString(fmt.Sprintf("\tKubernetes: %s\n", target.KubernetesVersion()))
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
|
@ -241,10 +191,8 @@ func parseStatusFlags(cmd *cobra.Command) (statusFlags, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
type kubeClient interface {
|
||||
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
||||
}
|
||||
|
||||
type configMapClient interface {
|
||||
GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
type kubeCmd interface {
|
||||
ClusterStatus(ctx context.Context) (map[string]kubecmd.NodeStatus, error)
|
||||
GetConstellationVersion(ctx context.Context) (kubecmd.NodeVersion, error)
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,14 +11,15 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
const successOutput = targetVersions + versionsOutput + nodesUpToDateOutput + attestationConfigOutput
|
||||
|
|
@ -47,42 +48,6 @@ const versionsOutput = `Service versions:
|
|||
|
||||
const attestationConfigOutput = `Attestation config:
|
||||
measurements:
|
||||
0:
|
||||
expected: 737f767a12f54e70eecbc8684011323ae2fe2dd9f90785577969d7a2013e8c12
|
||||
warnOnly: true
|
||||
2:
|
||||
expected: 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969
|
||||
warnOnly: true
|
||||
3:
|
||||
expected: 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969
|
||||
warnOnly: true
|
||||
4:
|
||||
expected: 55f7616b2c51dd7603f491c1c266373fe5c1e25e06a851d2090960172b03b27f
|
||||
warnOnly: false
|
||||
6:
|
||||
expected: 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969
|
||||
warnOnly: true
|
||||
7:
|
||||
expected: fb71e5e55cefba9e2b396d17604de0fe6e1841a76758856a120833e3ad1c40a3
|
||||
warnOnly: true
|
||||
8:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
9:
|
||||
expected: f7480d37929bef4b61c32823cb7b3771aea19f7510db2e1478719a1d88f9775d
|
||||
warnOnly: false
|
||||
11:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
12:
|
||||
expected: b8038d11eade4cfee5fd41da04bf64e58bab15c42bfe01801e4c0f61376ba010
|
||||
warnOnly: false
|
||||
13:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
14:
|
||||
expected: d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f
|
||||
warnOnly: true
|
||||
15:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
|
|
@ -90,17 +55,22 @@ const attestationConfigOutput = `Attestation config:
|
|||
|
||||
// TestStatus checks that the status function produces the correct strings.
|
||||
func TestStatus(t *testing.T) {
|
||||
mustParseNodeVersion := func(nV updatev1alpha1.NodeVersion) kubecmd.NodeVersion {
|
||||
nodeVersion, err := kubecmd.NewNodeVersion(nV)
|
||||
require.NoError(t, err)
|
||||
return nodeVersion
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
kubeClient stubKubeClient
|
||||
nodeVersion updatev1alpha1.NodeVersion
|
||||
dynamicErr error
|
||||
attestVariant variant.Variant
|
||||
expectedOutput string
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
kubeClient: stubKubeClient{
|
||||
nodes: []corev1.Node{
|
||||
{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{
|
||||
|
|
@ -112,29 +82,35 @@ func TestStatus(t *testing.T) {
|
|||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeVersion: updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
},
|
||||
"one of two nodes not upgraded": {
|
||||
kubeClient: stubKubeClient{
|
||||
nodes: []corev1.Node{
|
||||
{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "outdated",
|
||||
Annotations: map[string]string{
|
||||
|
|
@ -146,8 +122,8 @@ func TestStatus(t *testing.T) {
|
|||
KubeletVersion: "v1.2.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
}),
|
||||
"uptodate": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "uptodate",
|
||||
Annotations: map[string]string{
|
||||
|
|
@ -159,25 +135,123 @@ func TestStatus(t *testing.T) {
|
|||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeVersion: updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Some node versions are out of date",
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Some node versions are out of date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: inProgressOutput,
|
||||
},
|
||||
"error getting node status": {
|
||||
kubeClient: stubKubeClient{
|
||||
statusErr: assert.AnError,
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
wantErr: true,
|
||||
},
|
||||
"error getting node version": {
|
||||
kubeClient: stubKubeClient{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{
|
||||
"constellation.edgeless.systems/node-image": "v1.1.0",
|
||||
},
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
NodeInfo: corev1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
versionErr: assert.AnError,
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
wantErr: true,
|
||||
},
|
||||
"error getting attestation config": {
|
||||
kubeClient: stubKubeClient{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{
|
||||
"constellation.edgeless.systems/node-image": "v1.1.0",
|
||||
},
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
NodeInfo: corev1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestationErr: assert.AnError,
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
|
|
@ -185,16 +259,11 @@ func TestStatus(t *testing.T) {
|
|||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.nodeVersion)
|
||||
require.NoError(err)
|
||||
configMapper := stubConfigMapperAWSNitro{}
|
||||
variant := variant.AWSNitroTPM{}
|
||||
output, err := status(
|
||||
context.Background(),
|
||||
tc.kubeClient,
|
||||
configMapper,
|
||||
stubGetVersions(versionsOutput),
|
||||
&stubDynamicInterface{data: unstructured.Unstructured{Object: raw}, err: tc.dynamicErr},
|
||||
tc.kubeClient,
|
||||
variant,
|
||||
)
|
||||
if tc.wantErr {
|
||||
|
|
@ -207,36 +276,25 @@ func TestStatus(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type stubConfigMapperAWSNitro struct{}
|
||||
|
||||
func (s stubConfigMapperAWSNitro) GetConfigMap(_ context.Context, _ string) (*corev1.ConfigMap, error) {
|
||||
return &corev1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"attestationConfig": `{"measurements":{"0":{"expected":"737f767a12f54e70eecbc8684011323ae2fe2dd9f90785577969d7a2013e8c12","warnOnly":true},"11":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"12":{"expected":"b8038d11eade4cfee5fd41da04bf64e58bab15c42bfe01801e4c0f61376ba010","warnOnly":false},"13":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"14":{"expected":"d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f","warnOnly":true},"15":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"2":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"3":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"4":{"expected":"55f7616b2c51dd7603f491c1c266373fe5c1e25e06a851d2090960172b03b27f","warnOnly":false},"6":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"7":{"expected":"fb71e5e55cefba9e2b396d17604de0fe6e1841a76758856a120833e3ad1c40a3","warnOnly":true},"8":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"9":{"expected":"f7480d37929bef4b61c32823cb7b3771aea19f7510db2e1478719a1d88f9775d","warnOnly":false}}}`,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type stubKubeClient struct {
|
||||
nodes []corev1.Node
|
||||
err error
|
||||
status map[string]kubecmd.NodeStatus
|
||||
statusErr error
|
||||
version kubecmd.NodeVersion
|
||||
versionErr error
|
||||
attestation config.AttestationCfg
|
||||
attestationErr error
|
||||
}
|
||||
|
||||
func (s stubKubeClient) GetNodes(_ context.Context) ([]corev1.Node, error) {
|
||||
return s.nodes, s.err
|
||||
func (s stubKubeClient) ClusterStatus(_ context.Context) (map[string]kubecmd.NodeStatus, error) {
|
||||
return s.status, s.statusErr
|
||||
}
|
||||
|
||||
type stubDynamicInterface struct {
|
||||
data unstructured.Unstructured
|
||||
err error
|
||||
func (s stubKubeClient) GetConstellationVersion(_ context.Context) (kubecmd.NodeVersion, error) {
|
||||
return s.version, s.versionErr
|
||||
}
|
||||
|
||||
func (s *stubDynamicInterface) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) {
|
||||
return &s.data, s.err
|
||||
}
|
||||
|
||||
func (s *stubDynamicInterface) Update(_ context.Context, _ *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
return &s.data, s.err
|
||||
func (s stubKubeClient) GetClusterAttestationConfig(_ context.Context, _ variant.Variant) (config.AttestationCfg, error) {
|
||||
return s.attestation, s.attestationErr
|
||||
}
|
||||
|
||||
func stubGetVersions(output string) func() (fmt.Stringer, error) {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
|
|
@ -69,12 +69,12 @@ func runUpgradeApply(cmd *cobra.Command, _ []string) error {
|
|||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
upgradeID := generateUpgradeID(upgradeCmdKindApply)
|
||||
|
||||
kubeUpgrader, err := kubernetes.NewUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
kubeUpgrader, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helmUpgrader, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
helmUpgrader, err := helm.NewUpgradeClient(kubectl.NewUninitialized(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
|
|
@ -153,9 +153,10 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command) error {
|
|||
}
|
||||
conf.UpdateMAAURL(idFile.AttestationURL)
|
||||
|
||||
if err := u.confirmIfUpgradeAttestConfigHasDiff(cmd, conf.GetAttestationConfig(), flags); err != nil {
|
||||
if err := u.confirmAttestationConfigUpgrade(cmd, conf.GetAttestationConfig(), flags); err != nil {
|
||||
return fmt.Errorf("upgrading measurements: %w", err)
|
||||
}
|
||||
|
||||
// not moving existing Terraform migrator because of planned apply refactor
|
||||
tfOutput, err := u.migrateTerraform(cmd, conf, flags)
|
||||
if err != nil {
|
||||
|
|
@ -190,7 +191,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command) error {
|
|||
|
||||
err = u.kubeUpgrader.UpgradeNodeVersion(cmd.Context(), conf, flags.force)
|
||||
switch {
|
||||
case errors.Is(err, kubernetes.ErrInProgress):
|
||||
case errors.Is(err, kubecmd.ErrInProgress):
|
||||
cmd.PrintErrln("Skipping image and Kubernetes upgrades. Another upgrade is in progress.")
|
||||
case errors.As(err, &upgradeErr):
|
||||
cmd.PrintErrln(err)
|
||||
|
|
@ -337,9 +338,9 @@ func validK8sVersion(cmd *cobra.Command, version string, yes bool) (validVersion
|
|||
return validVersion, nil
|
||||
}
|
||||
|
||||
// confirmIfUpgradeAttestConfigHasDiff checks if the locally configured measurements are different from the cluster's measurements.
|
||||
// If so the function will ask the user to confirm (if --yes is not set).
|
||||
func (u *upgradeApplyCmd) confirmIfUpgradeAttestConfigHasDiff(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
|
||||
// confirmAttestationConfigUpgrade checks if the locally configured measurements are different from the cluster's measurements.
|
||||
// If so the function will ask the user to confirm (if --yes is not set) and upgrade the cluster's config.
|
||||
func (u *upgradeApplyCmd) confirmAttestationConfigUpgrade(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
|
||||
clusterAttestationConfig, err := u.kubeUpgrader.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting cluster attestation config: %w", err)
|
||||
|
|
@ -369,10 +370,7 @@ func (u *upgradeApplyCmd) confirmIfUpgradeAttestConfigHasDiff(cmd *cobra.Command
|
|||
return errors.New("aborting upgrade since attestation config is different")
|
||||
}
|
||||
}
|
||||
// TODO(elchead): move this outside this function to remove the side effect.
|
||||
if err := u.kubeUpgrader.BackupConfigMap(cmd.Context(), constants.JoinConfigMap); err != nil {
|
||||
return fmt.Errorf("backing up join-config: %w", err)
|
||||
}
|
||||
|
||||
if err := u.kubeUpgrader.UpdateAttestationConfig(cmd.Context(), newConfig); err != nil {
|
||||
return fmt.Errorf("updating attestation config: %w", err)
|
||||
}
|
||||
|
|
@ -496,8 +494,6 @@ type kubernetesUpgrader interface {
|
|||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
UpdateAttestationConfig(ctx context.Context, newAttestConfig config.AttestationCfg) error
|
||||
GetMeasurementSalt(ctx context.Context) ([]byte, error)
|
||||
BackupConfigMap(ctx context.Context, name string) error
|
||||
}
|
||||
|
||||
type helmUpgrader interface {
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
|
|
@ -32,13 +32,12 @@ import (
|
|||
|
||||
func TestUpgradeApply(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
helmUpgrader *stubHelmUpgrader
|
||||
kubeUpgrader *stubKubernetesUpgrader
|
||||
terraformUpgrader *stubTerraformUpgrader
|
||||
wantErr bool
|
||||
yesFlag bool
|
||||
dontWantJoinConfigBackup bool
|
||||
stdin string
|
||||
helmUpgrader *stubHelmUpgrader
|
||||
kubeUpgrader *stubKubernetesUpgrader
|
||||
terraformUpgrader *stubTerraformUpgrader
|
||||
wantErr bool
|
||||
yesFlag bool
|
||||
stdin string
|
||||
}{
|
||||
"success": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
|
|
@ -59,7 +58,7 @@ func TestUpgradeApply(t *testing.T) {
|
|||
"nodeVersion in progress error": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
nodeVersionErr: kubernetes.ErrInProgress,
|
||||
nodeVersionErr: kubecmd.ErrInProgress,
|
||||
},
|
||||
helmUpgrader: &stubHelmUpgrader{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
|
|
@ -129,10 +128,9 @@ func TestUpgradeApply(t *testing.T) {
|
|||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: fakeAzureAttestationConfigFromCluster(context.Background(), t, cloudprovider.Azure),
|
||||
},
|
||||
helmUpgrader: &stubHelmUpgrader{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
yesFlag: true,
|
||||
dontWantJoinConfigBackup: true,
|
||||
helmUpgrader: &stubHelmUpgrader{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
yesFlag: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -175,7 +173,6 @@ func TestUpgradeApply(t *testing.T) {
|
|||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
assert.Equal(!tc.dontWantJoinConfigBackup, tc.kubeUpgrader.backupWasCalled)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -192,18 +189,8 @@ func (u stubHelmUpgrader) Upgrade(
|
|||
}
|
||||
|
||||
type stubKubernetesUpgrader struct {
|
||||
backupWasCalled bool
|
||||
nodeVersionErr error
|
||||
currentConfig config.AttestationCfg
|
||||
}
|
||||
|
||||
func (u stubKubernetesUpgrader) GetMeasurementSalt(_ context.Context) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) BackupConfigMap(_ context.Context, _ string) error {
|
||||
u.backupWasCalled = true
|
||||
return nil
|
||||
nodeVersionErr error
|
||||
currentConfig config.AttestationCfg
|
||||
}
|
||||
|
||||
func (u stubKubernetesUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _ bool) error {
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/featureset"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
|
|
@ -79,7 +79,7 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("setting up terraform client: %w", err)
|
||||
}
|
||||
|
||||
kubeChecker, err := kubernetes.NewUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
kubeChecker, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
|
||||
}
|
||||
|
|
@ -317,8 +317,8 @@ func filterK8sUpgrades(currentVersion string, newVersions []string) []string {
|
|||
|
||||
type collector interface {
|
||||
currentVersions(ctx context.Context) (currentVersionInfo, error)
|
||||
supportedVersions(ctx context.Context, version, currentK8sVersion string) (supportedVersionInfo, error)
|
||||
newImages(ctx context.Context, version string) ([]versionsapi.Version, error)
|
||||
supportedVersions(ctx context.Context, currentImageVersion, currentK8sVersion string) (supportedVersionInfo, error)
|
||||
newImages(ctx context.Context, currentImageVersion string) ([]versionsapi.Version, error)
|
||||
newMeasurements(ctx context.Context, csp cloudprovider.Provider, attestationVariant variant.Variant, images []versionsapi.Version) (map[string]measurements.M, error)
|
||||
newerVersions(ctx context.Context, allowedVersions []string) ([]versionsapi.Version, error)
|
||||
newCLIVersions(ctx context.Context) ([]consemver.Semver, error)
|
||||
|
|
@ -376,7 +376,7 @@ type currentVersionInfo struct {
|
|||
}
|
||||
|
||||
func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionInfo, error) {
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, v.log)
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.NewUninitialized(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, v.log)
|
||||
if err != nil {
|
||||
return currentVersionInfo{}, fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
|
|
@ -386,20 +386,21 @@ func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionI
|
|||
return currentVersionInfo{}, fmt.Errorf("getting service versions: %w", err)
|
||||
}
|
||||
|
||||
imageVersion, err := getCurrentImageVersion(ctx, v.kubeChecker)
|
||||
clusterVersions, err := v.kubeChecker.GetConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return currentVersionInfo{}, fmt.Errorf("getting image version: %w", err)
|
||||
return currentVersionInfo{}, fmt.Errorf("getting cluster versions: %w", err)
|
||||
}
|
||||
|
||||
k8sVersion, err := getCurrentKubernetesVersion(ctx, v.kubeChecker)
|
||||
if err != nil {
|
||||
return currentVersionInfo{}, fmt.Errorf("getting Kubernetes version: %w", err)
|
||||
if !semver.IsValid(clusterVersions.ImageVersion()) {
|
||||
return currentVersionInfo{}, fmt.Errorf("checking image for valid semantic version: %w", err)
|
||||
}
|
||||
if !semver.IsValid(clusterVersions.KubernetesVersion()) {
|
||||
return currentVersionInfo{}, fmt.Errorf("checking Kubernetes for valid semantic version: %w", err)
|
||||
}
|
||||
|
||||
return currentVersionInfo{
|
||||
service: serviceVersions.ConstellationServices(),
|
||||
image: imageVersion,
|
||||
k8s: k8sVersion,
|
||||
image: clusterVersions.ImageVersion(),
|
||||
k8s: clusterVersions.KubernetesVersion(),
|
||||
cli: v.cliVersion,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -415,10 +416,10 @@ type supportedVersionInfo struct {
|
|||
}
|
||||
|
||||
// supportedVersions returns slices of supported versions.
|
||||
func (v *versionCollector) supportedVersions(ctx context.Context, version, currentK8sVersion string) (supportedVersionInfo, error) {
|
||||
func (v *versionCollector) supportedVersions(ctx context.Context, currentImageVersion, currentK8sVersion string) (supportedVersionInfo, error) {
|
||||
k8sVersions := versions.SupportedK8sVersions()
|
||||
|
||||
imageVersions, err := v.newImages(ctx, version)
|
||||
imageVersions, err := v.newImages(ctx, currentImageVersion)
|
||||
if err != nil {
|
||||
return supportedVersionInfo{}, fmt.Errorf("loading image versions: %w", err)
|
||||
}
|
||||
|
|
@ -441,13 +442,13 @@ func (v *versionCollector) supportedVersions(ctx context.Context, version, curre
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (v *versionCollector) newImages(ctx context.Context, version string) ([]versionsapi.Version, error) {
|
||||
func (v *versionCollector) newImages(ctx context.Context, currentImageVersion string) ([]versionsapi.Version, error) {
|
||||
// find compatible images
|
||||
// image updates should always be possible for the current minor version of the cluster
|
||||
// (e.g. 0.1.0 -> 0.1.1, 0.1.2, 0.1.3, etc.)
|
||||
// additionally, we allow updates to the next minor version (e.g. 0.1.0 -> 0.2.0)
|
||||
// if the CLI minor version is newer than the cluster minor version
|
||||
currentImageMinorVer := semver.MajorMinor(version)
|
||||
currentImageMinorVer := semver.MajorMinor(currentImageVersion)
|
||||
currentCLIMinorVer := semver.MajorMinor(v.cliVersion.String())
|
||||
nextImageMinorVer, err := compatibility.NextMinorVersion(currentImageMinorVer)
|
||||
if err != nil {
|
||||
|
|
@ -590,35 +591,6 @@ func (v *versionUpgrade) writeConfig(conf *config.Config, fileHandler file.Handl
|
|||
return fileHandler.WriteYAML(configPath, conf, file.OptOverwrite)
|
||||
}
|
||||
|
||||
// getCurrentImageVersion retrieves the semantic version of the image currently installed in the cluster.
|
||||
// If the cluster is not using a release image, an error is returned.
|
||||
func getCurrentImageVersion(ctx context.Context, checker kubernetesChecker) (string, error) {
|
||||
imageVersion, err := checker.CurrentImage(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !semver.IsValid(imageVersion) {
|
||||
return "", fmt.Errorf("current image version is not a release image version: %q", imageVersion)
|
||||
}
|
||||
|
||||
return imageVersion, nil
|
||||
}
|
||||
|
||||
// getCurrentKubernetesVersion retrieves the semantic version of Kubernetes currently installed in the cluster.
|
||||
func getCurrentKubernetesVersion(ctx context.Context, checker kubernetesChecker) (string, error) {
|
||||
k8sVersion, err := checker.CurrentKubernetesVersion(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !semver.IsValid(k8sVersion) {
|
||||
return "", fmt.Errorf("current kubernetes version is not a valid semver string: %q", k8sVersion)
|
||||
}
|
||||
|
||||
return k8sVersion, nil
|
||||
}
|
||||
|
||||
// getCompatibleImageMeasurements retrieves the expected measurements for each image.
|
||||
func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, client *http.Client, cosign sigstore.Verifier, rekor rekorVerifier,
|
||||
csp cloudprovider.Provider, attestationVariant variant.Variant, version versionsapi.Version, log debugLog,
|
||||
|
|
@ -711,7 +683,8 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv
|
|||
}
|
||||
|
||||
// filterCompatibleCLIVersions filters a list of CLI versions which are compatible with the current Kubernetes version.
|
||||
func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliPatchVersions []consemver.Semver, currentK8sVersion string) ([]consemver.Semver, error) {
|
||||
func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliPatchVersions []consemver.Semver, currentK8sVersion string,
|
||||
) ([]consemver.Semver, error) {
|
||||
// filter out invalid upgrades and versions which are not compatible with the current Kubernetes version
|
||||
var compatibleVersions []consemver.Semver
|
||||
for _, version := range cliPatchVersions {
|
||||
|
|
@ -750,8 +723,7 @@ type upgradeCheckFlags struct {
|
|||
}
|
||||
|
||||
type kubernetesChecker interface {
|
||||
CurrentImage(ctx context.Context) (string, error)
|
||||
CurrentKubernetesVersion(ctx context.Context) (string, error)
|
||||
GetConstellationVersion(ctx context.Context) (kubecmd.NodeVersion, error)
|
||||
}
|
||||
|
||||
type terraformChecker interface {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// TestBuildString checks that the resulting user output is as expected. Slow part is the Sscanf in parseCanonicalSemver().
|
||||
|
|
@ -108,46 +107,6 @@ func TestBuildString(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetCurrentImageVersion(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
stubKubernetesChecker stubKubernetesChecker
|
||||
wantErr bool
|
||||
}{
|
||||
"valid version": {
|
||||
stubKubernetesChecker: stubKubernetesChecker{
|
||||
image: "v1.0.0",
|
||||
},
|
||||
},
|
||||
"invalid version": {
|
||||
stubKubernetesChecker: stubKubernetesChecker{
|
||||
image: "invalid",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"GetCurrentImage error": {
|
||||
stubKubernetesChecker: stubKubernetesChecker{
|
||||
err: errors.New("error"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
version, err := getCurrentImageVersion(context.Background(), tc.stubKubernetesChecker)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
assert.True(semver.IsValid(version))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCompatibleImageMeasurements(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
|
@ -317,20 +276,6 @@ func (s *stubVersionCollector) filterCompatibleCLIVersions(_ context.Context, _
|
|||
return s.newCompatibleCLIVersionsList, nil
|
||||
}
|
||||
|
||||
type stubKubernetesChecker struct {
|
||||
image string
|
||||
k8sVersion string
|
||||
err error
|
||||
}
|
||||
|
||||
func (s stubKubernetesChecker) CurrentImage(context.Context) (string, error) {
|
||||
return s.image, s.err
|
||||
}
|
||||
|
||||
func (s stubKubernetesChecker) CurrentKubernetesVersion(context.Context) (string, error) {
|
||||
return s.k8sVersion, s.err
|
||||
}
|
||||
|
||||
type stubTerraformChecker struct {
|
||||
tfDiff bool
|
||||
err error
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue