mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-11 07:29:29 -05:00
cli: refactor kubernetes package (#2232)
* Clean up CLI kubernetes package * Rename CLI kubernetes pkg to kubecmd * Unify kubernetes clients * Refactor attestation config upgrade * Update CODEOWNERS file * Remove outdated GetMeasurementSalt --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
3bf316e28f
commit
afa7fd0edb
@ -10,7 +10,7 @@
|
||||
/cli/internal/cmd/upgrade* @derpsteb
|
||||
/cli/internal/featureset @malt3
|
||||
/cli/internal/helm @derpsteb
|
||||
/cli/internal/kubernetes @daniel-weisse
|
||||
/cli/internal/kubecmd @daniel-weisse
|
||||
/cli/internal/libvirt @daniel-weisse
|
||||
/cli/internal/terraform @elchead
|
||||
/cli/internal/upgrade @elchead
|
||||
|
@ -89,7 +89,7 @@ func main() {
|
||||
}
|
||||
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(),
|
||||
"aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
|
||||
metadata, &kubewaiter.CloudKubeAPIWaiter{},
|
||||
)
|
||||
openDevice = vtpm.OpenVTPM
|
||||
@ -109,7 +109,7 @@ func main() {
|
||||
|
||||
metadataAPI = metadata
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"gcp", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(),
|
||||
"gcp", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
|
||||
metadata, &kubewaiter.CloudKubeAPIWaiter{},
|
||||
)
|
||||
openDevice = vtpm.OpenVTPM
|
||||
@ -127,7 +127,7 @@ func main() {
|
||||
}
|
||||
metadataAPI = metadata
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(),
|
||||
"azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
|
||||
metadata, &kubewaiter.CloudKubeAPIWaiter{},
|
||||
)
|
||||
|
||||
@ -138,7 +138,7 @@ func main() {
|
||||
cloudLogger = qemucloud.NewLogger()
|
||||
metadata := qemucloud.New()
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"qemu", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(),
|
||||
"qemu", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
|
||||
metadata, &kubewaiter.CloudKubeAPIWaiter{},
|
||||
)
|
||||
metadataAPI = metadata
|
||||
@ -161,7 +161,7 @@ func main() {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to create OpenStack metadata client")
|
||||
}
|
||||
clusterInitJoiner = kubernetes.New(
|
||||
"openstack", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.New(),
|
||||
"openstack", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(),
|
||||
metadata, &kubewaiter.CloudKubeAPIWaiter{},
|
||||
)
|
||||
metadataAPI = metadata
|
||||
|
@ -49,7 +49,7 @@ const (
|
||||
// Client provides the functions to talk to the k8s API.
|
||||
type Client interface {
|
||||
Initialize(kubeconfig []byte) error
|
||||
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
|
||||
CreateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) error
|
||||
AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error
|
||||
ListAllNamespaces(ctx context.Context) (*corev1.NamespaceList, error)
|
||||
AnnotateNode(ctx context.Context, nodeName, annotationKey, annotationValue string) error
|
||||
|
@ -261,7 +261,7 @@ func (k *KubeWrapper) setupK8sComponentsConfigMap(ctx context.Context, component
|
||||
return "", fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
|
||||
}
|
||||
|
||||
if err := k.client.CreateConfigMap(ctx, componentsConfig); err != nil {
|
||||
if err := k.client.CreateConfigMap(ctx, &componentsConfig); err != nil {
|
||||
return "", fmt.Errorf("apply in KubeWrapper.setupK8sVersionConfigMap(..) for components config map failed with: %w", err)
|
||||
}
|
||||
|
||||
@ -284,7 +284,7 @@ func (k *KubeWrapper) setupInternalConfigMap(ctx context.Context) error {
|
||||
|
||||
// We do not use the client's Apply method here since we are handling a kubernetes-native type.
|
||||
// These types don't implement our custom Marshaler interface.
|
||||
if err := k.client.CreateConfigMap(ctx, config); err != nil {
|
||||
if err := k.client.CreateConfigMap(ctx, &config); err != nil {
|
||||
return fmt.Errorf("apply in KubeWrapper.setupInternalConfigMap failed with: %w", err)
|
||||
}
|
||||
|
||||
|
@ -522,7 +522,7 @@ func (s *stubKubectl) Initialize(_ []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stubKubectl) CreateConfigMap(_ context.Context, _ corev1.ConfigMap) error {
|
||||
func (s *stubKubectl) CreateConfigMap(_ context.Context, _ *corev1.ConfigMap) error {
|
||||
return s.createConfigMapErr
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ go_library(
|
||||
"//cli/internal/cmd/pathprefix",
|
||||
"//cli/internal/featureset",
|
||||
"//cli/internal/helm",
|
||||
"//cli/internal/kubernetes",
|
||||
"//cli/internal/kubecmd",
|
||||
"//cli/internal/libvirt",
|
||||
"//cli/internal/terraform",
|
||||
"//cli/internal/upgrade",
|
||||
@ -78,7 +78,6 @@ go_library(
|
||||
"//internal/sigstore",
|
||||
"//internal/sigstore/keyselect",
|
||||
"//internal/versions",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
||||
"//verify/verifyproto",
|
||||
"@com_github_golang_jwt_jwt_v5//:jwt",
|
||||
"@com_github_google_go_sev_guest//abi",
|
||||
@ -91,9 +90,7 @@ go_library(
|
||||
"@com_github_spf13_afero//:afero",
|
||||
"@com_github_spf13_cobra//:cobra",
|
||||
"@in_gopkg_yaml_v3//:yaml_v3",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@io_k8s_client_go//dynamic",
|
||||
"@io_k8s_client_go//tools/clientcmd",
|
||||
"@io_k8s_client_go//tools/clientcmd/api/latest",
|
||||
"@io_k8s_sigs_yaml//:yaml",
|
||||
@ -140,7 +137,7 @@ go_test(
|
||||
"//cli/internal/clusterid",
|
||||
"//cli/internal/cmd/pathprefix",
|
||||
"//cli/internal/helm",
|
||||
"//cli/internal/kubernetes",
|
||||
"//cli/internal/kubecmd",
|
||||
"//cli/internal/terraform",
|
||||
"//cli/internal/upgrade",
|
||||
"//disk-mapper/recoverproto",
|
||||
@ -172,8 +169,6 @@ go_test(
|
||||
"@com_github_stretchr_testify//require",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//status",
|
||||
|
@ -13,20 +13,16 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// NewStatusCmd returns a new cobra.Command for the statuus command.
|
||||
@ -50,38 +46,17 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
||||
}
|
||||
defer log.Sync()
|
||||
|
||||
kubeClient := kubectl.New()
|
||||
|
||||
flags, err := parseStatusFlags(cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing flags: %w", err)
|
||||
}
|
||||
|
||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
kubeConfig, err := fileHandler.Read(constants.AdminConfFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading admin.conf: %w", err)
|
||||
}
|
||||
|
||||
// need kubectl client to fetch nodes.
|
||||
if err := kubeClient.Initialize(kubeConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating k8s client config from kubeconfig: %w", err)
|
||||
}
|
||||
// need unstructed client to fetch NodeVersion CRD.
|
||||
unstructuredClient, err := dynamic.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up custom resource client: %w", err)
|
||||
}
|
||||
|
||||
// need helm client to fetch service versions.
|
||||
// The client used here, doesn't need to know the current workspace.
|
||||
// It may be refactored in the future for easier usage.
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.NewUninitialized(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
@ -97,11 +72,12 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
||||
}
|
||||
variant := conf.GetAttestationConfig().GetVariant()
|
||||
|
||||
stableClient, err := kubernetes.NewStableClient(constants.AdminConfFilename)
|
||||
kubeClient, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up stable client: %w", err)
|
||||
return fmt.Errorf("setting up kubernetes client: %w", err)
|
||||
}
|
||||
output, err := status(cmd.Context(), kubeClient, stableClient, helmVersionGetter, kubernetes.NewNodeVersionClient(unstructuredClient), variant)
|
||||
|
||||
output, err := status(cmd.Context(), helmVersionGetter, kubeClient, variant)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting status: %w", err)
|
||||
}
|
||||
@ -111,19 +87,14 @@ func runStatus(cmd *cobra.Command, _ []string) error {
|
||||
}
|
||||
|
||||
// status queries the cluster for the relevant status information and returns the output string.
|
||||
func status(
|
||||
ctx context.Context, kubeClient kubeClient, cmClient configMapClient, getHelmVersions func() (fmt.Stringer, error),
|
||||
dynamicInterface kubernetes.DynamicInterface, attestVariant variant.Variant,
|
||||
func status(ctx context.Context, getHelmVersions func() (fmt.Stringer, error), kubeClient kubeCmd, attestVariant variant.Variant,
|
||||
) (string, error) {
|
||||
nodeVersion, err := kubernetes.GetConstellationVersion(ctx, dynamicInterface)
|
||||
nodeVersion, err := kubeClient.GetConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting constellation version: %w", err)
|
||||
}
|
||||
if len(nodeVersion.Status.Conditions) != 1 {
|
||||
return "", fmt.Errorf("expected exactly one condition, got %d", len(nodeVersion.Status.Conditions))
|
||||
}
|
||||
|
||||
attestationConfig, err := getAttestationConfig(ctx, cmClient, attestVariant)
|
||||
attestationConfig, err := kubeClient.GetClusterAttestationConfig(ctx, attestVariant)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting attestation config: %w", err)
|
||||
}
|
||||
@ -132,51 +103,30 @@ func status(
|
||||
return "", fmt.Errorf("marshalling attestation config: %w", err)
|
||||
}
|
||||
|
||||
targetVersions, err := kubernetes.NewTargetVersions(nodeVersion)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting configured versions: %w", err)
|
||||
}
|
||||
|
||||
serviceVersions, err := getHelmVersions()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting service versions: %w", err)
|
||||
}
|
||||
|
||||
status, err := kubernetes.ClusterStatus(ctx, kubeClient)
|
||||
status, err := kubeClient.ClusterStatus(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting cluster status: %w", err)
|
||||
}
|
||||
|
||||
return statusOutput(targetVersions, serviceVersions, status, nodeVersion, string(prettyYAML)), nil
|
||||
}
|
||||
|
||||
func getAttestationConfig(ctx context.Context, cmClient configMapClient, attestVariant variant.Variant) (config.AttestationCfg, error) {
|
||||
joinConfig, err := cmClient.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting current config map: %w", err)
|
||||
}
|
||||
rawAttestationConfig, ok := joinConfig.Data[constants.AttestationConfigFilename]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("attestationConfig not found in %s", constants.JoinConfigMap)
|
||||
}
|
||||
attestationConfig, err := config.UnmarshalAttestationConfig([]byte(rawAttestationConfig), attestVariant)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshalling attestation config: %w", err)
|
||||
}
|
||||
return attestationConfig, nil
|
||||
return statusOutput(nodeVersion, serviceVersions, status, string(prettyYAML)), nil
|
||||
}
|
||||
|
||||
// statusOutput creates the status cmd output string by formatting the received information.
|
||||
func statusOutput(
|
||||
targetVersions kubernetes.TargetVersions, serviceVersions fmt.Stringer,
|
||||
status map[string]kubernetes.NodeStatus, nodeVersion v1alpha1.NodeVersion, rawAttestationConfig string,
|
||||
nodeVersion kubecmd.NodeVersion, serviceVersions fmt.Stringer,
|
||||
status map[string]kubecmd.NodeStatus, rawAttestationConfig string,
|
||||
) string {
|
||||
builder := strings.Builder{}
|
||||
|
||||
builder.WriteString(targetVersionsString(targetVersions))
|
||||
builder.WriteString(targetVersionsString(nodeVersion))
|
||||
builder.WriteString(serviceVersions.String())
|
||||
builder.WriteString(fmt.Sprintf("Cluster status: %s\n", nodeVersion.Status.Conditions[0].Message))
|
||||
builder.WriteString(nodeStatusString(status, targetVersions))
|
||||
builder.WriteString(fmt.Sprintf("Cluster status: %s\n", nodeVersion.ClusterStatus()))
|
||||
builder.WriteString(nodeStatusString(status, nodeVersion))
|
||||
builder.WriteString(fmt.Sprintf("Attestation config:\n%s", indentEntireStringWithTab(rawAttestationConfig)))
|
||||
return builder.String()
|
||||
}
|
||||
@ -190,14 +140,14 @@ func indentEntireStringWithTab(input string) string {
|
||||
}
|
||||
|
||||
// nodeStatusString creates the node status part of the output string.
|
||||
func nodeStatusString(status map[string]kubernetes.NodeStatus, targetVersions kubernetes.TargetVersions) string {
|
||||
func nodeStatusString(status map[string]kubecmd.NodeStatus, targetVersions kubecmd.NodeVersion) string {
|
||||
var upToDateImages int
|
||||
var upToDateK8s int
|
||||
for _, node := range status {
|
||||
if node.KubeletVersion() == targetVersions.Kubernetes() {
|
||||
if node.KubeletVersion() == targetVersions.KubernetesVersion() {
|
||||
upToDateK8s++
|
||||
}
|
||||
if node.ImageVersion() == targetVersions.ImagePath() {
|
||||
if node.ImageVersion() == targetVersions.ImageReference() {
|
||||
upToDateImages++
|
||||
}
|
||||
}
|
||||
@ -212,11 +162,11 @@ func nodeStatusString(status map[string]kubernetes.NodeStatus, targetVersions ku
|
||||
}
|
||||
|
||||
// targetVersionsString creates the target versions part of the output string.
|
||||
func targetVersionsString(target kubernetes.TargetVersions) string {
|
||||
func targetVersionsString(target kubecmd.NodeVersion) string {
|
||||
builder := strings.Builder{}
|
||||
builder.WriteString("Target versions:\n")
|
||||
builder.WriteString(fmt.Sprintf("\tImage: %s\n", target.Image()))
|
||||
builder.WriteString(fmt.Sprintf("\tKubernetes: %s\n", target.Kubernetes()))
|
||||
builder.WriteString(fmt.Sprintf("\tImage: %s\n", target.ImageVersion()))
|
||||
builder.WriteString(fmt.Sprintf("\tKubernetes: %s\n", target.KubernetesVersion()))
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
@ -241,10 +191,8 @@ func parseStatusFlags(cmd *cobra.Command) (statusFlags, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
type kubeClient interface {
|
||||
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
||||
}
|
||||
|
||||
type configMapClient interface {
|
||||
GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
type kubeCmd interface {
|
||||
ClusterStatus(ctx context.Context) (map[string]kubecmd.NodeStatus, error)
|
||||
GetConstellationVersion(ctx context.Context) (kubecmd.NodeVersion, error)
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
}
|
||||
|
@ -11,14 +11,15 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
const successOutput = targetVersions + versionsOutput + nodesUpToDateOutput + attestationConfigOutput
|
||||
@ -47,42 +48,6 @@ const versionsOutput = `Service versions:
|
||||
|
||||
const attestationConfigOutput = `Attestation config:
|
||||
measurements:
|
||||
0:
|
||||
expected: 737f767a12f54e70eecbc8684011323ae2fe2dd9f90785577969d7a2013e8c12
|
||||
warnOnly: true
|
||||
2:
|
||||
expected: 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969
|
||||
warnOnly: true
|
||||
3:
|
||||
expected: 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969
|
||||
warnOnly: true
|
||||
4:
|
||||
expected: 55f7616b2c51dd7603f491c1c266373fe5c1e25e06a851d2090960172b03b27f
|
||||
warnOnly: false
|
||||
6:
|
||||
expected: 3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969
|
||||
warnOnly: true
|
||||
7:
|
||||
expected: fb71e5e55cefba9e2b396d17604de0fe6e1841a76758856a120833e3ad1c40a3
|
||||
warnOnly: true
|
||||
8:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
9:
|
||||
expected: f7480d37929bef4b61c32823cb7b3771aea19f7510db2e1478719a1d88f9775d
|
||||
warnOnly: false
|
||||
11:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
12:
|
||||
expected: b8038d11eade4cfee5fd41da04bf64e58bab15c42bfe01801e4c0f61376ba010
|
||||
warnOnly: false
|
||||
13:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
14:
|
||||
expected: d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f
|
||||
warnOnly: true
|
||||
15:
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
warnOnly: false
|
||||
@ -90,17 +55,22 @@ const attestationConfigOutput = `Attestation config:
|
||||
|
||||
// TestStatus checks that the status function produces the correct strings.
|
||||
func TestStatus(t *testing.T) {
|
||||
mustParseNodeVersion := func(nV updatev1alpha1.NodeVersion) kubecmd.NodeVersion {
|
||||
nodeVersion, err := kubecmd.NewNodeVersion(nV)
|
||||
require.NoError(t, err)
|
||||
return nodeVersion
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
kubeClient stubKubeClient
|
||||
nodeVersion updatev1alpha1.NodeVersion
|
||||
dynamicErr error
|
||||
attestVariant variant.Variant
|
||||
expectedOutput string
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
kubeClient: stubKubeClient{
|
||||
nodes: []corev1.Node{
|
||||
{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{
|
||||
@ -112,29 +82,35 @@ func TestStatus(t *testing.T) {
|
||||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeVersion: updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
},
|
||||
"one of two nodes not upgraded": {
|
||||
kubeClient: stubKubeClient{
|
||||
nodes: []corev1.Node{
|
||||
{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "outdated",
|
||||
Annotations: map[string]string{
|
||||
@ -146,8 +122,8 @@ func TestStatus(t *testing.T) {
|
||||
KubeletVersion: "v1.2.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
}),
|
||||
"uptodate": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "uptodate",
|
||||
Annotations: map[string]string{
|
||||
@ -159,25 +135,123 @@ func TestStatus(t *testing.T) {
|
||||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeVersion: updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Some node versions are out of date",
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Some node versions are out of date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: inProgressOutput,
|
||||
},
|
||||
"error getting node status": {
|
||||
kubeClient: stubKubeClient{
|
||||
statusErr: assert.AnError,
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
wantErr: true,
|
||||
},
|
||||
"error getting node version": {
|
||||
kubeClient: stubKubeClient{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{
|
||||
"constellation.edgeless.systems/node-image": "v1.1.0",
|
||||
},
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
NodeInfo: corev1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
versionErr: assert.AnError,
|
||||
attestation: &config.QEMUVTPM{
|
||||
Measurements: measurements.M{
|
||||
15: measurements.WithAllBytes(0, measurements.Enforce, measurements.PCRMeasurementLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
wantErr: true,
|
||||
},
|
||||
"error getting attestation config": {
|
||||
kubeClient: stubKubeClient{
|
||||
status: map[string]kubecmd.NodeStatus{
|
||||
"outdated": kubecmd.NewNodeStatus(corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{
|
||||
"constellation.edgeless.systems/node-image": "v1.1.0",
|
||||
},
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
NodeInfo: corev1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.2.3",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
version: mustParseNodeVersion(updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: "v1.1.0",
|
||||
ImageReference: "v1.1.0",
|
||||
KubernetesClusterVersion: "v1.2.3",
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Message: "Node version of every node is up to date",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
attestationErr: assert.AnError,
|
||||
},
|
||||
attestVariant: variant.QEMUVTPM{},
|
||||
expectedOutput: successOutput,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
@ -185,16 +259,11 @@ func TestStatus(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.nodeVersion)
|
||||
require.NoError(err)
|
||||
configMapper := stubConfigMapperAWSNitro{}
|
||||
variant := variant.AWSNitroTPM{}
|
||||
output, err := status(
|
||||
context.Background(),
|
||||
tc.kubeClient,
|
||||
configMapper,
|
||||
stubGetVersions(versionsOutput),
|
||||
&stubDynamicInterface{data: unstructured.Unstructured{Object: raw}, err: tc.dynamicErr},
|
||||
tc.kubeClient,
|
||||
variant,
|
||||
)
|
||||
if tc.wantErr {
|
||||
@ -207,36 +276,25 @@ func TestStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type stubConfigMapperAWSNitro struct{}
|
||||
|
||||
func (s stubConfigMapperAWSNitro) GetConfigMap(_ context.Context, _ string) (*corev1.ConfigMap, error) {
|
||||
return &corev1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"attestationConfig": `{"measurements":{"0":{"expected":"737f767a12f54e70eecbc8684011323ae2fe2dd9f90785577969d7a2013e8c12","warnOnly":true},"11":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"12":{"expected":"b8038d11eade4cfee5fd41da04bf64e58bab15c42bfe01801e4c0f61376ba010","warnOnly":false},"13":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"14":{"expected":"d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f","warnOnly":true},"15":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"2":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"3":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"4":{"expected":"55f7616b2c51dd7603f491c1c266373fe5c1e25e06a851d2090960172b03b27f","warnOnly":false},"6":{"expected":"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969","warnOnly":true},"7":{"expected":"fb71e5e55cefba9e2b396d17604de0fe6e1841a76758856a120833e3ad1c40a3","warnOnly":true},"8":{"expected":"0000000000000000000000000000000000000000000000000000000000000000","warnOnly":false},"9":{"expected":"f7480d37929bef4b61c32823cb7b3771aea19f7510db2e1478719a1d88f9775d","warnOnly":false}}}`,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type stubKubeClient struct {
|
||||
nodes []corev1.Node
|
||||
err error
|
||||
status map[string]kubecmd.NodeStatus
|
||||
statusErr error
|
||||
version kubecmd.NodeVersion
|
||||
versionErr error
|
||||
attestation config.AttestationCfg
|
||||
attestationErr error
|
||||
}
|
||||
|
||||
func (s stubKubeClient) GetNodes(_ context.Context) ([]corev1.Node, error) {
|
||||
return s.nodes, s.err
|
||||
func (s stubKubeClient) ClusterStatus(_ context.Context) (map[string]kubecmd.NodeStatus, error) {
|
||||
return s.status, s.statusErr
|
||||
}
|
||||
|
||||
type stubDynamicInterface struct {
|
||||
data unstructured.Unstructured
|
||||
err error
|
||||
func (s stubKubeClient) GetConstellationVersion(_ context.Context) (kubecmd.NodeVersion, error) {
|
||||
return s.version, s.versionErr
|
||||
}
|
||||
|
||||
func (s *stubDynamicInterface) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) {
|
||||
return &s.data, s.err
|
||||
}
|
||||
|
||||
func (s *stubDynamicInterface) Update(_ context.Context, _ *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
return &s.data, s.err
|
||||
func (s stubKubeClient) GetClusterAttestationConfig(_ context.Context, _ variant.Variant) (config.AttestationCfg, error) {
|
||||
return s.attestation, s.attestationErr
|
||||
}
|
||||
|
||||
func stubGetVersions(output string) func() (fmt.Stringer, error) {
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
@ -69,12 +69,12 @@ func runUpgradeApply(cmd *cobra.Command, _ []string) error {
|
||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
upgradeID := generateUpgradeID(upgradeCmdKindApply)
|
||||
|
||||
kubeUpgrader, err := kubernetes.NewUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
kubeUpgrader, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helmUpgrader, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
helmUpgrader, err := helm.NewUpgradeClient(kubectl.NewUninitialized(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
@ -153,9 +153,10 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command) error {
|
||||
}
|
||||
conf.UpdateMAAURL(idFile.AttestationURL)
|
||||
|
||||
if err := u.confirmIfUpgradeAttestConfigHasDiff(cmd, conf.GetAttestationConfig(), flags); err != nil {
|
||||
if err := u.confirmAttestationConfigUpgrade(cmd, conf.GetAttestationConfig(), flags); err != nil {
|
||||
return fmt.Errorf("upgrading measurements: %w", err)
|
||||
}
|
||||
|
||||
// not moving existing Terraform migrator because of planned apply refactor
|
||||
tfOutput, err := u.migrateTerraform(cmd, conf, flags)
|
||||
if err != nil {
|
||||
@ -190,7 +191,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command) error {
|
||||
|
||||
err = u.kubeUpgrader.UpgradeNodeVersion(cmd.Context(), conf, flags.force)
|
||||
switch {
|
||||
case errors.Is(err, kubernetes.ErrInProgress):
|
||||
case errors.Is(err, kubecmd.ErrInProgress):
|
||||
cmd.PrintErrln("Skipping image and Kubernetes upgrades. Another upgrade is in progress.")
|
||||
case errors.As(err, &upgradeErr):
|
||||
cmd.PrintErrln(err)
|
||||
@ -337,9 +338,9 @@ func validK8sVersion(cmd *cobra.Command, version string, yes bool) (validVersion
|
||||
return validVersion, nil
|
||||
}
|
||||
|
||||
// confirmIfUpgradeAttestConfigHasDiff checks if the locally configured measurements are different from the cluster's measurements.
|
||||
// If so the function will ask the user to confirm (if --yes is not set).
|
||||
func (u *upgradeApplyCmd) confirmIfUpgradeAttestConfigHasDiff(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
|
||||
// confirmAttestationConfigUpgrade checks if the locally configured measurements are different from the cluster's measurements.
|
||||
// If so the function will ask the user to confirm (if --yes is not set) and upgrade the cluster's config.
|
||||
func (u *upgradeApplyCmd) confirmAttestationConfigUpgrade(cmd *cobra.Command, newConfig config.AttestationCfg, flags upgradeApplyFlags) error {
|
||||
clusterAttestationConfig, err := u.kubeUpgrader.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting cluster attestation config: %w", err)
|
||||
@ -369,10 +370,7 @@ func (u *upgradeApplyCmd) confirmIfUpgradeAttestConfigHasDiff(cmd *cobra.Command
|
||||
return errors.New("aborting upgrade since attestation config is different")
|
||||
}
|
||||
}
|
||||
// TODO(elchead): move this outside this function to remove the side effect.
|
||||
if err := u.kubeUpgrader.BackupConfigMap(cmd.Context(), constants.JoinConfigMap); err != nil {
|
||||
return fmt.Errorf("backing up join-config: %w", err)
|
||||
}
|
||||
|
||||
if err := u.kubeUpgrader.UpdateAttestationConfig(cmd.Context(), newConfig); err != nil {
|
||||
return fmt.Errorf("updating attestation config: %w", err)
|
||||
}
|
||||
@ -496,8 +494,6 @@ type kubernetesUpgrader interface {
|
||||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
|
||||
UpdateAttestationConfig(ctx context.Context, newAttestConfig config.AttestationCfg) error
|
||||
GetMeasurementSalt(ctx context.Context) ([]byte, error)
|
||||
BackupConfigMap(ctx context.Context, name string) error
|
||||
}
|
||||
|
||||
type helmUpgrader interface {
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
@ -32,13 +32,12 @@ import (
|
||||
|
||||
func TestUpgradeApply(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
helmUpgrader *stubHelmUpgrader
|
||||
kubeUpgrader *stubKubernetesUpgrader
|
||||
terraformUpgrader *stubTerraformUpgrader
|
||||
wantErr bool
|
||||
yesFlag bool
|
||||
dontWantJoinConfigBackup bool
|
||||
stdin string
|
||||
helmUpgrader *stubHelmUpgrader
|
||||
kubeUpgrader *stubKubernetesUpgrader
|
||||
terraformUpgrader *stubTerraformUpgrader
|
||||
wantErr bool
|
||||
yesFlag bool
|
||||
stdin string
|
||||
}{
|
||||
"success": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{currentConfig: config.DefaultForAzureSEVSNP()},
|
||||
@ -59,7 +58,7 @@ func TestUpgradeApply(t *testing.T) {
|
||||
"nodeVersion in progress error": {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: config.DefaultForAzureSEVSNP(),
|
||||
nodeVersionErr: kubernetes.ErrInProgress,
|
||||
nodeVersionErr: kubecmd.ErrInProgress,
|
||||
},
|
||||
helmUpgrader: &stubHelmUpgrader{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
@ -129,10 +128,9 @@ func TestUpgradeApply(t *testing.T) {
|
||||
kubeUpgrader: &stubKubernetesUpgrader{
|
||||
currentConfig: fakeAzureAttestationConfigFromCluster(context.Background(), t, cloudprovider.Azure),
|
||||
},
|
||||
helmUpgrader: &stubHelmUpgrader{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
yesFlag: true,
|
||||
dontWantJoinConfigBackup: true,
|
||||
helmUpgrader: &stubHelmUpgrader{},
|
||||
terraformUpgrader: &stubTerraformUpgrader{},
|
||||
yesFlag: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -175,7 +173,6 @@ func TestUpgradeApply(t *testing.T) {
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
assert.Equal(!tc.dontWantJoinConfigBackup, tc.kubeUpgrader.backupWasCalled)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -192,18 +189,8 @@ func (u stubHelmUpgrader) Upgrade(
|
||||
}
|
||||
|
||||
type stubKubernetesUpgrader struct {
|
||||
backupWasCalled bool
|
||||
nodeVersionErr error
|
||||
currentConfig config.AttestationCfg
|
||||
}
|
||||
|
||||
func (u stubKubernetesUpgrader) GetMeasurementSalt(_ context.Context) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (u *stubKubernetesUpgrader) BackupConfigMap(_ context.Context, _ string) error {
|
||||
u.backupWasCalled = true
|
||||
return nil
|
||||
nodeVersionErr error
|
||||
currentConfig config.AttestationCfg
|
||||
}
|
||||
|
||||
func (u stubKubernetesUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _ bool) error {
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/featureset"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/kubecmd"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi"
|
||||
@ -79,7 +79,7 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
||||
return fmt.Errorf("setting up terraform client: %w", err)
|
||||
}
|
||||
|
||||
kubeChecker, err := kubernetes.NewUpgrader(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
kubeChecker, err := kubecmd.New(cmd.OutOrStdout(), constants.AdminConfFilename, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
|
||||
}
|
||||
@ -317,8 +317,8 @@ func filterK8sUpgrades(currentVersion string, newVersions []string) []string {
|
||||
|
||||
type collector interface {
|
||||
currentVersions(ctx context.Context) (currentVersionInfo, error)
|
||||
supportedVersions(ctx context.Context, version, currentK8sVersion string) (supportedVersionInfo, error)
|
||||
newImages(ctx context.Context, version string) ([]versionsapi.Version, error)
|
||||
supportedVersions(ctx context.Context, currentImageVersion, currentK8sVersion string) (supportedVersionInfo, error)
|
||||
newImages(ctx context.Context, currentImageVersion string) ([]versionsapi.Version, error)
|
||||
newMeasurements(ctx context.Context, csp cloudprovider.Provider, attestationVariant variant.Variant, images []versionsapi.Version) (map[string]measurements.M, error)
|
||||
newerVersions(ctx context.Context, allowedVersions []string) ([]versionsapi.Version, error)
|
||||
newCLIVersions(ctx context.Context) ([]consemver.Semver, error)
|
||||
@ -376,7 +376,7 @@ type currentVersionInfo struct {
|
||||
}
|
||||
|
||||
func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionInfo, error) {
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.New(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, v.log)
|
||||
helmClient, err := helm.NewUpgradeClient(kubectl.NewUninitialized(), constants.UpgradeDir, constants.AdminConfFilename, constants.HelmNamespace, v.log)
|
||||
if err != nil {
|
||||
return currentVersionInfo{}, fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
@ -386,20 +386,21 @@ func (v *versionCollector) currentVersions(ctx context.Context) (currentVersionI
|
||||
return currentVersionInfo{}, fmt.Errorf("getting service versions: %w", err)
|
||||
}
|
||||
|
||||
imageVersion, err := getCurrentImageVersion(ctx, v.kubeChecker)
|
||||
clusterVersions, err := v.kubeChecker.GetConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return currentVersionInfo{}, fmt.Errorf("getting image version: %w", err)
|
||||
return currentVersionInfo{}, fmt.Errorf("getting cluster versions: %w", err)
|
||||
}
|
||||
|
||||
k8sVersion, err := getCurrentKubernetesVersion(ctx, v.kubeChecker)
|
||||
if err != nil {
|
||||
return currentVersionInfo{}, fmt.Errorf("getting Kubernetes version: %w", err)
|
||||
if !semver.IsValid(clusterVersions.ImageVersion()) {
|
||||
return currentVersionInfo{}, fmt.Errorf("checking image for valid semantic version: %w", err)
|
||||
}
|
||||
if !semver.IsValid(clusterVersions.KubernetesVersion()) {
|
||||
return currentVersionInfo{}, fmt.Errorf("checking Kubernetes for valid semantic version: %w", err)
|
||||
}
|
||||
|
||||
return currentVersionInfo{
|
||||
service: serviceVersions.ConstellationServices(),
|
||||
image: imageVersion,
|
||||
k8s: k8sVersion,
|
||||
image: clusterVersions.ImageVersion(),
|
||||
k8s: clusterVersions.KubernetesVersion(),
|
||||
cli: v.cliVersion,
|
||||
}, nil
|
||||
}
|
||||
@ -415,10 +416,10 @@ type supportedVersionInfo struct {
|
||||
}
|
||||
|
||||
// supportedVersions returns slices of supported versions.
|
||||
func (v *versionCollector) supportedVersions(ctx context.Context, version, currentK8sVersion string) (supportedVersionInfo, error) {
|
||||
func (v *versionCollector) supportedVersions(ctx context.Context, currentImageVersion, currentK8sVersion string) (supportedVersionInfo, error) {
|
||||
k8sVersions := versions.SupportedK8sVersions()
|
||||
|
||||
imageVersions, err := v.newImages(ctx, version)
|
||||
imageVersions, err := v.newImages(ctx, currentImageVersion)
|
||||
if err != nil {
|
||||
return supportedVersionInfo{}, fmt.Errorf("loading image versions: %w", err)
|
||||
}
|
||||
@ -441,13 +442,13 @@ func (v *versionCollector) supportedVersions(ctx context.Context, version, curre
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (v *versionCollector) newImages(ctx context.Context, version string) ([]versionsapi.Version, error) {
|
||||
func (v *versionCollector) newImages(ctx context.Context, currentImageVersion string) ([]versionsapi.Version, error) {
|
||||
// find compatible images
|
||||
// image updates should always be possible for the current minor version of the cluster
|
||||
// (e.g. 0.1.0 -> 0.1.1, 0.1.2, 0.1.3, etc.)
|
||||
// additionally, we allow updates to the next minor version (e.g. 0.1.0 -> 0.2.0)
|
||||
// if the CLI minor version is newer than the cluster minor version
|
||||
currentImageMinorVer := semver.MajorMinor(version)
|
||||
currentImageMinorVer := semver.MajorMinor(currentImageVersion)
|
||||
currentCLIMinorVer := semver.MajorMinor(v.cliVersion.String())
|
||||
nextImageMinorVer, err := compatibility.NextMinorVersion(currentImageMinorVer)
|
||||
if err != nil {
|
||||
@ -590,35 +591,6 @@ func (v *versionUpgrade) writeConfig(conf *config.Config, fileHandler file.Handl
|
||||
return fileHandler.WriteYAML(configPath, conf, file.OptOverwrite)
|
||||
}
|
||||
|
||||
// getCurrentImageVersion retrieves the semantic version of the image currently installed in the cluster.
|
||||
// If the cluster is not using a release image, an error is returned.
|
||||
func getCurrentImageVersion(ctx context.Context, checker kubernetesChecker) (string, error) {
|
||||
imageVersion, err := checker.CurrentImage(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !semver.IsValid(imageVersion) {
|
||||
return "", fmt.Errorf("current image version is not a release image version: %q", imageVersion)
|
||||
}
|
||||
|
||||
return imageVersion, nil
|
||||
}
|
||||
|
||||
// getCurrentKubernetesVersion retrieves the semantic version of Kubernetes currently installed in the cluster.
|
||||
func getCurrentKubernetesVersion(ctx context.Context, checker kubernetesChecker) (string, error) {
|
||||
k8sVersion, err := checker.CurrentKubernetesVersion(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !semver.IsValid(k8sVersion) {
|
||||
return "", fmt.Errorf("current kubernetes version is not a valid semver string: %q", k8sVersion)
|
||||
}
|
||||
|
||||
return k8sVersion, nil
|
||||
}
|
||||
|
||||
// getCompatibleImageMeasurements retrieves the expected measurements for each image.
|
||||
func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, client *http.Client, cosign sigstore.Verifier, rekor rekorVerifier,
|
||||
csp cloudprovider.Provider, attestationVariant variant.Variant, version versionsapi.Version, log debugLog,
|
||||
@ -711,7 +683,8 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv
|
||||
}
|
||||
|
||||
// filterCompatibleCLIVersions filters a list of CLI versions which are compatible with the current Kubernetes version.
|
||||
func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliPatchVersions []consemver.Semver, currentK8sVersion string) ([]consemver.Semver, error) {
|
||||
func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliPatchVersions []consemver.Semver, currentK8sVersion string,
|
||||
) ([]consemver.Semver, error) {
|
||||
// filter out invalid upgrades and versions which are not compatible with the current Kubernetes version
|
||||
var compatibleVersions []consemver.Semver
|
||||
for _, version := range cliPatchVersions {
|
||||
@ -750,8 +723,7 @@ type upgradeCheckFlags struct {
|
||||
}
|
||||
|
||||
type kubernetesChecker interface {
|
||||
CurrentImage(ctx context.Context) (string, error)
|
||||
CurrentKubernetesVersion(ctx context.Context) (string, error)
|
||||
GetConstellationVersion(ctx context.Context) (kubecmd.NodeVersion, error)
|
||||
}
|
||||
|
||||
type terraformChecker interface {
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// TestBuildString checks that the resulting user output is as expected. Slow part is the Sscanf in parseCanonicalSemver().
|
||||
@ -108,46 +107,6 @@ func TestBuildString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCurrentImageVersion(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
stubKubernetesChecker stubKubernetesChecker
|
||||
wantErr bool
|
||||
}{
|
||||
"valid version": {
|
||||
stubKubernetesChecker: stubKubernetesChecker{
|
||||
image: "v1.0.0",
|
||||
},
|
||||
},
|
||||
"invalid version": {
|
||||
stubKubernetesChecker: stubKubernetesChecker{
|
||||
image: "invalid",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"GetCurrentImage error": {
|
||||
stubKubernetesChecker: stubKubernetesChecker{
|
||||
err: errors.New("error"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
version, err := getCurrentImageVersion(context.Background(), tc.stubKubernetesChecker)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
assert.True(semver.IsValid(version))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCompatibleImageMeasurements(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
@ -317,20 +276,6 @@ func (s *stubVersionCollector) filterCompatibleCLIVersions(_ context.Context, _
|
||||
return s.newCompatibleCLIVersionsList, nil
|
||||
}
|
||||
|
||||
type stubKubernetesChecker struct {
|
||||
image string
|
||||
k8sVersion string
|
||||
err error
|
||||
}
|
||||
|
||||
func (s stubKubernetesChecker) CurrentImage(context.Context) (string, error) {
|
||||
return s.image, s.err
|
||||
}
|
||||
|
||||
func (s stubKubernetesChecker) CurrentKubernetesVersion(context.Context) (string, error) {
|
||||
return s.k8sVersion, s.err
|
||||
}
|
||||
|
||||
type stubTerraformChecker struct {
|
||||
tfDiff bool
|
||||
err error
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
func (c *UpgradeClient) backupCRDs(ctx context.Context, upgradeID string) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
c.log.Debugf("Starting CRD backup")
|
||||
crds, err := c.kubectl.GetCRDs(ctx)
|
||||
crds, err := c.kubectl.ListCRDs(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting CRDs: %w", err)
|
||||
}
|
||||
@ -66,7 +66,7 @@ func (c *UpgradeClient) backupCRs(ctx context.Context, crds []apiextensionsv1.Cu
|
||||
c.log.Debugf("Creating backup of CRs for %q at version %q", crd.Name, version.Name)
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural}
|
||||
crs, err := c.kubectl.GetCRs(ctx, gvr)
|
||||
crs, err := c.kubectl.ListCRs(ctx, gvr)
|
||||
if err != nil {
|
||||
if !k8serrors.IsNotFound(err) {
|
||||
return fmt.Errorf("retrieving CR %s: %w", crd.Name, err)
|
||||
|
@ -181,14 +181,14 @@ type stubCrdClient struct {
|
||||
crdClient
|
||||
}
|
||||
|
||||
func (c stubCrdClient) GetCRDs(_ context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
func (c stubCrdClient) ListCRDs(_ context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
if c.getCRDsError != nil {
|
||||
return nil, c.getCRDsError
|
||||
}
|
||||
return c.crds, nil
|
||||
}
|
||||
|
||||
func (c stubCrdClient) GetCRs(_ context.Context, _ schema.GroupVersionResource) ([]unstructured.Unstructured, error) {
|
||||
func (c stubCrdClient) ListCRs(_ context.Context, _ schema.GroupVersionResource) ([]unstructured.Unstructured, error) {
|
||||
if c.getCRsError != nil {
|
||||
return nil, c.getCRsError
|
||||
}
|
||||
|
@ -363,8 +363,8 @@ func (c *UpgradeClient) updateCRDs(ctx context.Context, chart *chart.Chart) erro
|
||||
type crdClient interface {
|
||||
Initialize(kubeconfig []byte) error
|
||||
ApplyCRD(ctx context.Context, rawCRD []byte) error
|
||||
GetCRDs(ctx context.Context) ([]apiextensionsv1.CustomResourceDefinition, error)
|
||||
GetCRs(ctx context.Context, gvr schema.GroupVersionResource) ([]unstructured.Unstructured, error)
|
||||
ListCRDs(ctx context.Context) ([]apiextensionsv1.CustomResourceDefinition, error)
|
||||
ListCRs(ctx context.Context, gvr schema.GroupVersionResource) ([]unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
type actionWrapper interface {
|
||||
|
@ -2,13 +2,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "kubernetes",
|
||||
name = "kubecmd",
|
||||
srcs = [
|
||||
"kubernetes.go",
|
||||
"kubecmd.go",
|
||||
"status.go",
|
||||
"upgrade.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/kubernetes",
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/kubecmd",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/api/versionsapi",
|
||||
@ -19,6 +18,7 @@ go_library(
|
||||
"//internal/constants",
|
||||
"//internal/imagefetcher",
|
||||
"//internal/kubernetes",
|
||||
"//internal/kubernetes/kubectl",
|
||||
"//internal/versions",
|
||||
"//internal/versions/components",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
||||
@ -28,9 +28,6 @@ go_library(
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@io_k8s_apimachinery//pkg/runtime/schema",
|
||||
"@io_k8s_client_go//dynamic",
|
||||
"@io_k8s_client_go//kubernetes",
|
||||
"@io_k8s_client_go//tools/clientcmd",
|
||||
"@io_k8s_client_go//util/retry",
|
||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3",
|
||||
"@io_k8s_sigs_yaml//:yaml",
|
||||
@ -38,9 +35,9 @@ go_library(
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "kubernetes_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
embed = [":kubernetes"],
|
||||
name = "kubecmd_test",
|
||||
srcs = ["kubecmd_test.go"],
|
||||
embed = [":kubecmd"],
|
||||
deps = [
|
||||
"//internal/attestation/variant",
|
||||
"//internal/cloud/cloudprovider",
|
451
cli/internal/kubecmd/kubecmd.go
Normal file
451
cli/internal/kubecmd/kubecmd.go
Normal file
@ -0,0 +1,451 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
/*
|
||||
Package kubecmd provides functions to interact with a Kubernetes cluster to the CLI.
|
||||
The package should be used for:
|
||||
|
||||
- Fetching status information about the cluster
|
||||
- Creating, deleting, or migrating resources not managed by Helm
|
||||
|
||||
The package should not be used for anything that doesn't just require the Kubernetes API.
|
||||
For example, Terraform and Helm actions should not be accessed by this package.
|
||||
*/
|
||||
package kubecmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/util/retry"
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// ErrInProgress signals that an upgrade is in progress inside the cluster.
|
||||
var ErrInProgress = errors.New("upgrade in progress")
|
||||
|
||||
// InvalidUpgradeError present an invalid upgrade. It wraps the source and destination version for improved debuggability.
|
||||
type applyError struct {
|
||||
expected string
|
||||
actual string
|
||||
}
|
||||
|
||||
// Error returns the String representation of this error.
|
||||
func (e *applyError) Error() string {
|
||||
return fmt.Sprintf("expected NodeVersion to contain %s, got %s", e.expected, e.actual)
|
||||
}
|
||||
|
||||
// KubeCmd handles interaction with the cluster's components using the CLI.
|
||||
type KubeCmd struct {
|
||||
kubectl kubectlInterface
|
||||
imageFetcher imageFetcher
|
||||
outWriter io.Writer
|
||||
log debugLog
|
||||
}
|
||||
|
||||
// New returns a new KubeCmd.
|
||||
func New(outWriter io.Writer, kubeConfigPath string, log debugLog) (*KubeCmd, error) {
|
||||
client, err := kubectl.NewFromConfig(kubeConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating kubectl client: %w", err)
|
||||
}
|
||||
|
||||
return &KubeCmd{
|
||||
kubectl: client,
|
||||
imageFetcher: imagefetcher.New(),
|
||||
outWriter: outWriter,
|
||||
log: log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||
// The versions set in the config are validated against the versions running in the cluster.
|
||||
func (k *KubeCmd) UpgradeNodeVersion(ctx context.Context, conf *config.Config, force bool) error {
|
||||
provider := conf.GetProvider()
|
||||
attestationVariant := conf.GetAttestationConfig().GetVariant()
|
||||
region := conf.GetRegion()
|
||||
imageReference, err := k.imageFetcher.FetchReference(ctx, provider, attestationVariant, conf.Image, region)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image reference: %w", err)
|
||||
}
|
||||
|
||||
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing version from image short path: %w", err)
|
||||
}
|
||||
|
||||
nodeVersion, err := k.getConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
upgradeErrs := []error{}
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
|
||||
err = k.isValidImageUpgrade(nodeVersion, imageVersion.Version(), force)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
|
||||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.Version())
|
||||
nodeVersion.Spec.ImageReference = imageReference
|
||||
nodeVersion.Spec.ImageVersion = imageVersion.Version()
|
||||
|
||||
// We have to allow users to specify outdated k8s patch versions.
|
||||
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
|
||||
var components *corev1.ConfigMap
|
||||
currentK8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion, true)
|
||||
if err != nil {
|
||||
innerErr := fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", "))
|
||||
err = compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion, conf.KubernetesVersion, innerErr)
|
||||
} else {
|
||||
versionConfig := versions.VersionConfigs[currentK8sVersion]
|
||||
components, err = k.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force)
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
err := k.applyComponentsCM(ctx, components)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
|
||||
}
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
|
||||
default:
|
||||
return fmt.Errorf("updating Kubernetes version: %w", err)
|
||||
}
|
||||
|
||||
if len(upgradeErrs) == 2 {
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying upgrade: %w", err)
|
||||
}
|
||||
|
||||
if err := checkForApplyError(nodeVersion, updatedNodeVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
// ClusterStatus returns a map from node name to NodeStatus.
|
||||
func (k *KubeCmd) ClusterStatus(ctx context.Context) (map[string]NodeStatus, error) {
|
||||
nodes, err := k.kubectl.GetNodes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting nodes: %w", err)
|
||||
}
|
||||
|
||||
clusterStatus := map[string]NodeStatus{}
|
||||
for _, node := range nodes {
|
||||
clusterStatus[node.ObjectMeta.Name] = NewNodeStatus(node)
|
||||
}
|
||||
|
||||
return clusterStatus, nil
|
||||
}
|
||||
|
||||
// GetClusterAttestationConfig fetches the join-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the attestation config.
|
||||
func (k *KubeCmd) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) {
|
||||
existingConf, err := k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
}
|
||||
if _, ok := existingConf.Data[constants.AttestationConfigFilename]; !ok {
|
||||
return nil, errors.New("attestation config missing from join-config")
|
||||
}
|
||||
|
||||
existingAttestationConfig, err := config.UnmarshalAttestationConfig([]byte(existingConf.Data[constants.AttestationConfigFilename]), variant)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
}
|
||||
|
||||
return existingAttestationConfig, nil
|
||||
}
|
||||
|
||||
// UpdateAttestationConfig updates the Constellation cluster's attestation config.
|
||||
// A backup of the previous config is created before updating.
|
||||
func (k *KubeCmd) UpdateAttestationConfig(ctx context.Context, newAttestConfig config.AttestationCfg) error {
|
||||
// backup of previous measurements
|
||||
joinConfig, err := k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting %s ConfigMap: %w", constants.JoinConfigMap, err)
|
||||
}
|
||||
|
||||
// create backup of previous config
|
||||
backup := joinConfig.DeepCopy()
|
||||
backup.ObjectMeta = metav1.ObjectMeta{}
|
||||
backup.Name = fmt.Sprintf("%s-backup", constants.JoinConfigMap)
|
||||
if err := k.applyConfigMap(ctx, backup); err != nil {
|
||||
return fmt.Errorf("creating backup of join-config ConfigMap: %w", err)
|
||||
}
|
||||
k.log.Debugf("Created backup of %s ConfigMap %q in namespace %q", constants.JoinConfigMap, backup.Name, backup.Namespace)
|
||||
|
||||
newConfigJSON, err := json.Marshal(newAttestConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling attestation config: %w", err)
|
||||
}
|
||||
joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON)
|
||||
k.log.Debugf("Triggering attestation config update now")
|
||||
if _, err = k.kubectl.UpdateConfigMap(ctx, joinConfig); err != nil {
|
||||
return fmt.Errorf("setting new attestation config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(k.outWriter, "Successfully updated the cluster's attestation config")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs.
|
||||
// Existing SANs are preserved.
|
||||
func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error {
|
||||
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting ClusterConfig: %w", err)
|
||||
}
|
||||
|
||||
existingSANs := make(map[string]struct{})
|
||||
for _, existingSAN := range clusterConfiguration.APIServer.CertSANs {
|
||||
existingSANs[existingSAN] = struct{}{}
|
||||
}
|
||||
|
||||
var missingSANs []string
|
||||
for _, san := range alternativeNames {
|
||||
if _, ok := existingSANs[san]; !ok {
|
||||
missingSANs = append(missingSANs, san)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingSANs) == 0 {
|
||||
return nil
|
||||
}
|
||||
k.log.Debugf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", "))
|
||||
|
||||
clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...)
|
||||
sort.Strings(clusterConfiguration.APIServer.CertSANs)
|
||||
|
||||
newConfigYAML, err := yaml.Marshal(clusterConfiguration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
||||
k.log.Debugf("Triggering kubeadm config update now")
|
||||
if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(k.outWriter, "Successfully extended the cluster's apiserver SAN field")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConstellationVersion retrieves the Kubernetes and image version of a Constellation cluster,
|
||||
// as well as the Kubernetes components reference, and image reference string.
|
||||
func (k *KubeCmd) GetConstellationVersion(ctx context.Context) (NodeVersion, error) {
|
||||
nV, err := k.getConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return NodeVersion{}, fmt.Errorf("retrieving Constellation version: %w", err)
|
||||
}
|
||||
|
||||
return NewNodeVersion(nV)
|
||||
}
|
||||
|
||||
// getConstellationVersion returns the NodeVersion object of a Constellation cluster.
|
||||
func (k *KubeCmd) getConstellationVersion(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
|
||||
raw, err := k.kubectl.GetCR(ctx, schema.GroupVersionResource{
|
||||
Group: "update.edgeless.systems",
|
||||
Version: "v1alpha1",
|
||||
Resource: "nodeversions",
|
||||
}, "constellation-version")
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, err
|
||||
}
|
||||
var nodeVersion updatev1alpha1.NodeVersion
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.UnstructuredContent(), &nodeVersion); err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||
}
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// getClusterConfiguration fetches the kubeadm-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the ClusterConfiguration.
|
||||
func (k *KubeCmd) getClusterConfiguration(ctx context.Context) (kubeadmv1beta3.ClusterConfiguration, *corev1.ConfigMap, error) {
|
||||
existingConf, err := k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap)
|
||||
if err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("retrieving current kubeadm-config: %w", err)
|
||||
}
|
||||
clusterConf, ok := existingConf.Data[constants.ClusterConfigurationKey]
|
||||
if !ok {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, errors.New("ClusterConfiguration missing from kubeadm-config")
|
||||
}
|
||||
|
||||
var existingClusterConfig kubeadmv1beta3.ClusterConfiguration
|
||||
if err := yaml.Unmarshal([]byte(clusterConf), &existingClusterConfig); err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("unmarshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
return existingClusterConfig, existingConf, nil
|
||||
}
|
||||
|
||||
// applyComponentsCM applies the k8s components ConfigMap to the cluster.
|
||||
func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.ConfigMap) error {
|
||||
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
|
||||
if err := k.kubectl.CreateConfigMap(ctx, components); err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) {
|
||||
k.log.Debugf("Triggering NodeVersion upgrade now")
|
||||
var updatedNodeVersion updatev1alpha1.NodeVersion
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
newNode, err := k.getConstellationVersion(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving current NodeVersion: %w", err)
|
||||
}
|
||||
|
||||
updateNodeVersions(nodeVersion, &newNode)
|
||||
|
||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
|
||||
}
|
||||
updated, err := k.kubectl.UpdateCR(ctx, schema.GroupVersionResource{
|
||||
Group: "update.edgeless.systems",
|
||||
Version: "v1alpha1",
|
||||
Resource: "nodeversions",
|
||||
}, &unstructured.Unstructured{Object: raw})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
|
||||
return fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return updatedNodeVersion, err
|
||||
}
|
||||
|
||||
// isValidImageUpdate checks if the new image version is a valid upgrade, and there is no upgrade already running.
|
||||
func (k *KubeCmd) isValidImageUpgrade(nodeVersion updatev1alpha1.NodeVersion, newImageVersion string, force bool) error {
|
||||
if !force {
|
||||
// check if an image update is already in progress
|
||||
if nodeVersion.Status.ActiveClusterVersionUpgrade {
|
||||
return ErrInProgress
|
||||
}
|
||||
for _, condition := range nodeVersion.Status.Conditions {
|
||||
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
|
||||
return ErrInProgress
|
||||
}
|
||||
}
|
||||
|
||||
// check if the image upgrade is valid for the current version
|
||||
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.ImageVersion, newImageVersion); err != nil {
|
||||
return fmt.Errorf("validating image update: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubeCmd) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components, force bool) (*corev1.ConfigMap, error) {
|
||||
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
|
||||
}
|
||||
if !force {
|
||||
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
k.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
|
||||
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
|
||||
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
|
||||
|
||||
return &configMap, nil
|
||||
}
|
||||
|
||||
// applyConfigMap applies the ConfigMap by creating it if it doesn't exist, or updating it if it does.
|
||||
func (k *KubeCmd) applyConfigMap(ctx context.Context, configMap *corev1.ConfigMap) error {
|
||||
if err := k.kubectl.CreateConfigMap(ctx, configMap); err != nil {
|
||||
if !k8serrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("creating backup config map: %w", err)
|
||||
}
|
||||
if _, err := k.kubectl.UpdateConfigMap(ctx, configMap); err != nil {
|
||||
return fmt.Errorf("updating backup config map: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkForApplyError(expected, actual updatev1alpha1.NodeVersion) error {
|
||||
var err error
|
||||
switch {
|
||||
case actual.Spec.ImageReference != expected.Spec.ImageReference:
|
||||
err = &applyError{expected: expected.Spec.ImageReference, actual: actual.Spec.ImageReference}
|
||||
case actual.Spec.ImageVersion != expected.Spec.ImageVersion:
|
||||
err = &applyError{expected: expected.Spec.ImageVersion, actual: actual.Spec.ImageVersion}
|
||||
case actual.Spec.KubernetesComponentsReference != expected.Spec.KubernetesComponentsReference:
|
||||
err = &applyError{expected: expected.Spec.KubernetesComponentsReference, actual: actual.Spec.KubernetesComponentsReference}
|
||||
case actual.Spec.KubernetesClusterVersion != expected.Spec.KubernetesClusterVersion:
|
||||
err = &applyError{expected: expected.Spec.KubernetesClusterVersion, actual: actual.Spec.KubernetesClusterVersion}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// kubectlInterface is provides access to the Kubernetes API.
|
||||
type kubectlInterface interface {
|
||||
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
||||
GetConfigMap(ctx context.Context, namespace, name string) (*corev1.ConfigMap, error)
|
||||
UpdateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
CreateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) error
|
||||
KubernetesVersion() (string, error)
|
||||
GetCR(ctx context.Context, gvr schema.GroupVersionResource, name string) (*unstructured.Unstructured, error)
|
||||
UpdateCR(ctx context.Context, gvr schema.GroupVersionResource, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
type debugLog interface {
|
||||
Debugf(format string, args ...any)
|
||||
Sync()
|
||||
}
|
||||
|
||||
// imageFetcher gets an image reference from the versionsapi.
|
||||
type imageFetcher interface {
|
||||
FetchReference(ctx context.Context,
|
||||
provider cloudprovider.Provider, attestationVariant variant.Variant,
|
||||
image, region string,
|
||||
) (string, error)
|
||||
}
|
@ -4,10 +4,11 @@ Copyright (c) Edgeless Systems GmbH
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
package kubecmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
@ -34,9 +35,8 @@ import (
|
||||
)
|
||||
|
||||
func TestUpgradeNodeVersion(t *testing.T) {
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
stable *fakeStableClient
|
||||
kubectl *stubKubectl
|
||||
conditions []metav1.Condition
|
||||
currentImageVersion string
|
||||
newImageReference string
|
||||
@ -44,11 +44,11 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentClusterVersion string
|
||||
conf *config.Config
|
||||
force bool
|
||||
getErr error
|
||||
getCRErr error
|
||||
wantErr bool
|
||||
wantUpdate bool
|
||||
assertCorrectError func(t *testing.T, err error) bool
|
||||
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) DynamicInterface
|
||||
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
|
||||
}{
|
||||
"success": {
|
||||
conf: func() *config.Config {
|
||||
@ -59,7 +59,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
@ -75,7 +75,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
@ -96,7 +96,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
@ -117,7 +117,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{},
|
||||
kubectl: &stubKubectl{},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
@ -137,7 +137,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{},
|
||||
kubectl: &stubKubectl{},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, ErrInProgress)
|
||||
@ -156,7 +156,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{},
|
||||
kubectl: &stubKubectl{},
|
||||
force: true,
|
||||
wantUpdate: true,
|
||||
},
|
||||
@ -164,12 +164,20 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
getErr: someErr,
|
||||
wantErr: true,
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
getCRErr: assert.AnError,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, someErr)
|
||||
return assert.ErrorIs(t, err, assert.AnError)
|
||||
},
|
||||
},
|
||||
"image too new valid k8s": {
|
||||
@ -182,7 +190,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
newImageReference: "path/to/image:v1.4.2",
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`),
|
||||
},
|
||||
@ -204,7 +212,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
newImageReference: "path/to/image:v1.4.2",
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
@ -222,7 +230,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
badImageVersion: "v3.2.1",
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
@ -243,7 +251,7 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
@ -264,17 +272,17 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &fakeStableClient{
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: false, // because customClient is used
|
||||
customClientFn: func(nodeVersion updatev1alpha1.NodeVersion) DynamicInterface {
|
||||
fakeClient := &fakeDynamicClient{}
|
||||
fakeClient.On("GetCurrent", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 1), nil)
|
||||
fakeClient.On("Update", mock.Anything, mock.Anything).Return(nil, kerrors.NewConflict(schema.GroupResource{Resource: nodeVersion.Name}, nodeVersion.Name, nil)).Once()
|
||||
fakeClient.On("Update", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 2), nil).Once()
|
||||
customClientFn: func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface {
|
||||
fakeClient := &fakeUnstructuredClient{}
|
||||
fakeClient.On("GetCR", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 1), nil)
|
||||
fakeClient.On("UpdateCR", mock.Anything, mock.Anything).Return(nil, kerrors.NewConflict(schema.GroupResource{Resource: nodeVersion.Name}, nodeVersion.Name, nil)).Once()
|
||||
fakeClient.On("UpdateCR", mock.Anything, mock.Anything).Return(unstructedObjectWithGeneration(nodeVersion, 2), nil).Once()
|
||||
return fakeClient
|
||||
},
|
||||
},
|
||||
@ -305,24 +313,29 @@ func TestUpgradeNodeVersion(t *testing.T) {
|
||||
|
||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, badUpdatedObject: badUpdatedObject, getErr: tc.getErr}
|
||||
upgrader := Upgrader{
|
||||
stableInterface: tc.stable,
|
||||
dynamicInterface: dynamicClient,
|
||||
imageFetcher: &stubImageFetcher{reference: tc.newImageReference},
|
||||
log: logger.NewTest(t),
|
||||
outWriter: io.Discard,
|
||||
unstructuredClient := &stubUnstructuredClient{
|
||||
object: &unstructured.Unstructured{Object: unstrNodeVersion},
|
||||
badUpdatedObject: badUpdatedObject,
|
||||
getCRErr: tc.getCRErr,
|
||||
}
|
||||
tc.kubectl.unstructuredInterface = unstructuredClient
|
||||
if tc.customClientFn != nil {
|
||||
upgrader.dynamicInterface = tc.customClientFn(nodeVersion)
|
||||
tc.kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
||||
}
|
||||
|
||||
upgrader := KubeCmd{
|
||||
kubectl: tc.kubectl,
|
||||
imageFetcher: &stubImageFetcher{reference: tc.newImageReference},
|
||||
log: logger.NewTest(t),
|
||||
outWriter: io.Discard,
|
||||
}
|
||||
|
||||
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf, tc.force)
|
||||
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
|
||||
if tc.wantUpdate {
|
||||
assert.NotNil(dynamicClient.updatedObject)
|
||||
assert.NotNil(unstructuredClient.updatedObject)
|
||||
} else {
|
||||
assert.Nil(dynamicClient.updatedObject)
|
||||
assert.Nil(unstructuredClient.updatedObject)
|
||||
}
|
||||
|
||||
if tc.wantErr {
|
||||
@ -368,7 +381,7 @@ func TestUpdateImage(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
upgrader := &Upgrader{
|
||||
upgrader := &KubeCmd{
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
@ -379,21 +392,13 @@ func TestUpdateImage(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := upgrader.updateImage(&nodeVersion, tc.newImageReference, tc.newImageVersion, false)
|
||||
err := upgrader.isValidImageUpgrade(nodeVersion, tc.newImageVersion, false)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
if tc.wantUpdate {
|
||||
assert.Equal(tc.newImageReference, nodeVersion.Spec.ImageReference)
|
||||
assert.Equal(tc.newImageVersion, nodeVersion.Spec.ImageVersion)
|
||||
} else {
|
||||
assert.Equal(tc.oldImageReference, nodeVersion.Spec.ImageReference)
|
||||
assert.Equal(tc.oldImageVersion, nodeVersion.Spec.ImageVersion)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -427,7 +432,7 @@ func TestUpdateK8s(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
upgrader := &Upgrader{
|
||||
upgrader := &KubeCmd{
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
@ -465,16 +470,65 @@ func newJoinConfigMap(data string) *corev1.ConfigMap {
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDynamicClient struct {
|
||||
func TestUpdateAttestationConfig(t *testing.T) {
|
||||
mustMarshal := func(cfg config.AttestationCfg) string {
|
||||
data, err := json.Marshal(cfg)
|
||||
require.NoError(t, err)
|
||||
return string(data)
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
newAttestationCfg config.AttestationCfg
|
||||
kubectl *stubKubectl
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
newAttestationCfg: config.DefaultForAzureSEVSNP(),
|
||||
kubectl: &stubKubectl{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(mustMarshal(config.DefaultForAzureSEVSNP())),
|
||||
},
|
||||
},
|
||||
},
|
||||
"error getting ConfigMap": {
|
||||
newAttestationCfg: config.DefaultForAzureSEVSNP(),
|
||||
kubectl: &stubKubectl{
|
||||
getCMErr: assert.AnError,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
cmd := &KubeCmd{
|
||||
kubectl: tc.kubectl,
|
||||
log: logger.NewTest(t),
|
||||
outWriter: io.Discard,
|
||||
}
|
||||
|
||||
err := cmd.UpdateAttestationConfig(context.Background(), tc.newAttestationCfg)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeUnstructuredClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (u *fakeDynamicClient) GetCurrent(ctx context.Context, str string) (*unstructured.Unstructured, error) {
|
||||
func (u *fakeUnstructuredClient) GetCR(ctx context.Context, _ schema.GroupVersionResource, str string) (*unstructured.Unstructured, error) {
|
||||
args := u.Called(ctx, str)
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (u *fakeDynamicClient) Update(ctx context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
func (u *fakeUnstructuredClient) UpdateCR(ctx context.Context, _ schema.GroupVersionResource, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
args := u.Called(ctx, updatedObject)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
@ -482,60 +536,72 @@ func (u *fakeDynamicClient) Update(ctx context.Context, updatedObject *unstructu
|
||||
return updatedObject, args.Error(1)
|
||||
}
|
||||
|
||||
type stubDynamicClient struct {
|
||||
type stubUnstructuredClient struct {
|
||||
object *unstructured.Unstructured
|
||||
updatedObject *unstructured.Unstructured
|
||||
badUpdatedObject *unstructured.Unstructured
|
||||
getErr error
|
||||
updateErr error
|
||||
getCRErr error
|
||||
updateCRErr error
|
||||
}
|
||||
|
||||
func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) {
|
||||
return u.object, u.getErr
|
||||
func (u *stubUnstructuredClient) GetCR(_ context.Context, _ schema.GroupVersionResource, _ string) (*unstructured.Unstructured, error) {
|
||||
return u.object, u.getCRErr
|
||||
}
|
||||
|
||||
func (u *stubDynamicClient) Update(_ context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
func (u *stubUnstructuredClient) UpdateCR(_ context.Context, _ schema.GroupVersionResource, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
u.updatedObject = updatedObject
|
||||
if u.badUpdatedObject != nil {
|
||||
return u.badUpdatedObject, u.updateErr
|
||||
return u.badUpdatedObject, u.updateCRErr
|
||||
}
|
||||
return u.updatedObject, u.updateErr
|
||||
return u.updatedObject, u.updateCRErr
|
||||
}
|
||||
|
||||
type fakeStableClient struct {
|
||||
type unstructuredInterface interface {
|
||||
GetCR(ctx context.Context, gvr schema.GroupVersionResource, name string) (*unstructured.Unstructured, error)
|
||||
UpdateCR(ctx context.Context, gvr schema.GroupVersionResource, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
type stubKubectl struct {
|
||||
unstructuredInterface
|
||||
configMaps map[string]*corev1.ConfigMap
|
||||
updatedConfigMaps map[string]*corev1.ConfigMap
|
||||
k8sVersion string
|
||||
getErr error
|
||||
updateErr error
|
||||
createErr error
|
||||
getCMErr error
|
||||
updateCMErr error
|
||||
createCMErr error
|
||||
k8sErr error
|
||||
nodes []corev1.Node
|
||||
nodesErr error
|
||||
}
|
||||
|
||||
func (s *fakeStableClient) GetConfigMap(_ context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
return s.configMaps[name], s.getErr
|
||||
func (s *stubKubectl) GetConfigMap(_ context.Context, _, name string) (*corev1.ConfigMap, error) {
|
||||
return s.configMaps[name], s.getCMErr
|
||||
}
|
||||
|
||||
func (s *fakeStableClient) UpdateConfigMap(_ context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
func (s *stubKubectl) UpdateConfigMap(_ context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
if s.updatedConfigMaps == nil {
|
||||
s.updatedConfigMaps = map[string]*corev1.ConfigMap{}
|
||||
}
|
||||
s.updatedConfigMaps[configMap.ObjectMeta.Name] = configMap
|
||||
return s.updatedConfigMaps[configMap.ObjectMeta.Name], s.updateErr
|
||||
return s.updatedConfigMaps[configMap.ObjectMeta.Name], s.updateCMErr
|
||||
}
|
||||
|
||||
func (s *fakeStableClient) CreateConfigMap(_ context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
func (s *stubKubectl) CreateConfigMap(_ context.Context, configMap *corev1.ConfigMap) error {
|
||||
if s.configMaps == nil {
|
||||
s.configMaps = map[string]*corev1.ConfigMap{}
|
||||
}
|
||||
s.configMaps[configMap.ObjectMeta.Name] = configMap
|
||||
return s.configMaps[configMap.ObjectMeta.Name], s.createErr
|
||||
return s.createCMErr
|
||||
}
|
||||
|
||||
func (s *fakeStableClient) KubernetesVersion() (string, error) {
|
||||
func (s *stubKubectl) KubernetesVersion() (string, error) {
|
||||
return s.k8sVersion, s.k8sErr
|
||||
}
|
||||
|
||||
func (s *stubKubectl) GetNodes(_ context.Context) ([]corev1.Node, error) {
|
||||
return s.nodes, s.nodesErr
|
||||
}
|
||||
|
||||
type stubImageFetcher struct {
|
||||
reference string
|
||||
fetchReferenceErr error
|
94
cli/internal/kubecmd/status.go
Normal file
94
cli/internal/kubecmd/status.go
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubecmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// NodeVersion bundles version information of a Constellation cluster.
|
||||
type NodeVersion struct {
|
||||
imageVersion string
|
||||
imageReference string
|
||||
kubernetesVersion string
|
||||
clusterStatus string
|
||||
}
|
||||
|
||||
// NewNodeVersion returns the target versions for the cluster.
|
||||
func NewNodeVersion(nodeVersion updatev1alpha1.NodeVersion) (NodeVersion, error) {
|
||||
if len(nodeVersion.Status.Conditions) != 1 {
|
||||
return NodeVersion{}, fmt.Errorf("expected exactly one condition, got %d", len(nodeVersion.Status.Conditions))
|
||||
}
|
||||
return NodeVersion{
|
||||
imageVersion: nodeVersion.Spec.ImageVersion,
|
||||
imageReference: nodeVersion.Spec.ImageReference,
|
||||
kubernetesVersion: nodeVersion.Spec.KubernetesClusterVersion,
|
||||
clusterStatus: nodeVersion.Status.Conditions[0].Message,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImageVersion is the version of the image running on a node.
|
||||
func (n NodeVersion) ImageVersion() string {
|
||||
return n.imageVersion
|
||||
}
|
||||
|
||||
// ImageReference is a CSP specific path to the image.
|
||||
func (n NodeVersion) ImageReference() string {
|
||||
return n.imageReference
|
||||
}
|
||||
|
||||
// KubernetesVersion is the Kubernetes version running on a node.
|
||||
func (n NodeVersion) KubernetesVersion() string {
|
||||
return n.kubernetesVersion
|
||||
}
|
||||
|
||||
// ClusterStatus is a string describing the status of the cluster.
|
||||
func (n NodeVersion) ClusterStatus() string {
|
||||
return n.clusterStatus
|
||||
}
|
||||
|
||||
// NodeStatus bundles status information about a Kubernetes node.
|
||||
type NodeStatus struct {
|
||||
kubeletVersion string
|
||||
imageVersion string
|
||||
}
|
||||
|
||||
// NewNodeStatus returns a new NodeStatus.
|
||||
func NewNodeStatus(node corev1.Node) NodeStatus {
|
||||
return NodeStatus{
|
||||
kubeletVersion: node.Status.NodeInfo.KubeletVersion,
|
||||
imageVersion: node.ObjectMeta.Annotations["constellation.edgeless.systems/node-image"],
|
||||
}
|
||||
}
|
||||
|
||||
// KubeletVersion returns the kubelet version of the node.
|
||||
func (n *NodeStatus) KubeletVersion() string {
|
||||
return n.kubeletVersion
|
||||
}
|
||||
|
||||
// ImageVersion returns the node image of the node.
|
||||
func (n *NodeStatus) ImageVersion() string {
|
||||
return n.imageVersion
|
||||
}
|
||||
|
||||
func updateNodeVersions(newNodeVersion updatev1alpha1.NodeVersion, node *updatev1alpha1.NodeVersion) {
|
||||
if newNodeVersion.Spec.ImageVersion != "" {
|
||||
node.Spec.ImageVersion = newNodeVersion.Spec.ImageVersion
|
||||
}
|
||||
if newNodeVersion.Spec.ImageReference != "" {
|
||||
node.Spec.ImageReference = newNodeVersion.Spec.ImageReference
|
||||
}
|
||||
if newNodeVersion.Spec.KubernetesComponentsReference != "" {
|
||||
node.Spec.KubernetesComponentsReference = newNodeVersion.Spec.KubernetesComponentsReference
|
||||
}
|
||||
if newNodeVersion.Spec.KubernetesClusterVersion != "" {
|
||||
node.Spec.KubernetesClusterVersion = newNodeVersion.Spec.KubernetesClusterVersion
|
||||
}
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
/*
|
||||
Package kubernetes provides functions to interact with a Kubernetes cluster to the CLI.
|
||||
The package should be used for:
|
||||
|
||||
- Fetching status information about the cluster
|
||||
- Creating, deleting, or migrating resources not managed by Helm
|
||||
|
||||
The package should not be used for anything that doesn't just require the Kubernetes API.
|
||||
For example, Terraform and Helm actions should not be accessed by this package.
|
||||
*/
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func newClient(kubeconfigPath string) (kubernetes.Interface, error) {
|
||||
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building kubernetes config: %w", err)
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up kubernetes client: %w", err)
|
||||
}
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
// StableInterface is an interface to interact with stable resources.
|
||||
type StableInterface interface {
|
||||
GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
UpdateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
CreateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
KubernetesVersion() (string, error)
|
||||
}
|
||||
|
||||
// NewStableClient returns a new StableClient.
|
||||
func NewStableClient(kubeconfigPath string) (StableInterface, error) {
|
||||
client, err := newClient(kubeconfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stableClient{client}, nil
|
||||
}
|
||||
|
||||
type stableClient struct {
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
// GetConfigMap returns a ConfigMap given it's name.
|
||||
func (u *stableClient) GetConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// UpdateConfigMap updates the given ConfigMap.
|
||||
func (u *stableClient) UpdateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Update(ctx, configMap, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// CreateConfigMap creates the given ConfigMap.
|
||||
func (u *stableClient) CreateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Create(ctx, configMap, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// KubernetesVersion returns the Kubernetes version of the cluster.
|
||||
func (u *stableClient) KubernetesVersion() (string, error) {
|
||||
serverVersion, err := u.client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return serverVersion.GitVersion, nil
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// TargetVersions bundles version information about the target versions of a cluster.
|
||||
type TargetVersions struct {
|
||||
// image version
|
||||
image string
|
||||
// CSP specific path to the image
|
||||
imageReference string
|
||||
// kubernetes version
|
||||
kubernetes string
|
||||
}
|
||||
|
||||
// NewTargetVersions returns the target versions for the cluster.
|
||||
func NewTargetVersions(nodeVersion updatev1alpha1.NodeVersion) (TargetVersions, error) {
|
||||
return TargetVersions{
|
||||
image: nodeVersion.Spec.ImageVersion,
|
||||
imageReference: nodeVersion.Spec.ImageReference,
|
||||
kubernetes: nodeVersion.Spec.KubernetesClusterVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Image return the image version.
|
||||
func (c *TargetVersions) Image() string {
|
||||
return c.image
|
||||
}
|
||||
|
||||
// ImagePath return the image path.
|
||||
func (c *TargetVersions) ImagePath() string {
|
||||
return c.imageReference
|
||||
}
|
||||
|
||||
// Kubernetes return the Kubernetes version.
|
||||
func (c *TargetVersions) Kubernetes() string {
|
||||
return c.kubernetes
|
||||
}
|
||||
|
||||
// ClusterStatus returns a map from node name to NodeStatus.
|
||||
func ClusterStatus(ctx context.Context, kubeclient kubeClient) (map[string]NodeStatus, error) {
|
||||
nodes, err := kubeclient.GetNodes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting nodes: %w", err)
|
||||
}
|
||||
|
||||
clusterStatus := map[string]NodeStatus{}
|
||||
for _, node := range nodes {
|
||||
clusterStatus[node.ObjectMeta.Name] = NewNodeStatus(node)
|
||||
}
|
||||
|
||||
return clusterStatus, nil
|
||||
}
|
||||
|
||||
// NodeStatus bundles status information about a node.
|
||||
type NodeStatus struct {
|
||||
kubeletVersion string
|
||||
imageVersion string
|
||||
}
|
||||
|
||||
// NewNodeStatus returns a new NodeStatus.
|
||||
func NewNodeStatus(node corev1.Node) NodeStatus {
|
||||
return NodeStatus{
|
||||
kubeletVersion: node.Status.NodeInfo.KubeletVersion,
|
||||
imageVersion: node.ObjectMeta.Annotations["constellation.edgeless.systems/node-image"],
|
||||
}
|
||||
}
|
||||
|
||||
// KubeletVersion returns the kubelet version of the node.
|
||||
func (n *NodeStatus) KubeletVersion() string {
|
||||
return n.kubeletVersion
|
||||
}
|
||||
|
||||
// ImageVersion returns the node image of the node.
|
||||
func (n *NodeStatus) ImageVersion() string {
|
||||
return n.imageVersion
|
||||
}
|
||||
|
||||
type kubeClient interface {
|
||||
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
||||
}
|
@ -1,520 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/api/versionsapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/imagefetcher"
|
||||
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// ErrInProgress signals that an upgrade is in progress inside the cluster.
|
||||
var ErrInProgress = errors.New("upgrade in progress")
|
||||
|
||||
// GetConstellationVersion queries the constellation-version object for a given field.
|
||||
func GetConstellationVersion(ctx context.Context, client DynamicInterface) (updatev1alpha1.NodeVersion, error) {
|
||||
raw, err := client.GetCurrent(ctx, "constellation-version")
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, err
|
||||
}
|
||||
var nodeVersion updatev1alpha1.NodeVersion
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.UnstructuredContent(), &nodeVersion); err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||
}
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// InvalidUpgradeError present an invalid upgrade. It wraps the source and destination version for improved debuggability.
|
||||
type applyError struct {
|
||||
expected string
|
||||
actual string
|
||||
}
|
||||
|
||||
// Error returns the String representation of this error.
|
||||
func (e *applyError) Error() string {
|
||||
return fmt.Sprintf("expected NodeVersion to contain %s, got %s", e.expected, e.actual)
|
||||
}
|
||||
|
||||
// Upgrader handles upgrading the cluster's components using the CLI.
|
||||
type Upgrader struct {
|
||||
stableInterface StableInterface
|
||||
dynamicInterface DynamicInterface
|
||||
imageFetcher imageFetcher
|
||||
outWriter io.Writer
|
||||
log debugLog
|
||||
}
|
||||
|
||||
// NewUpgrader returns a new Upgrader.
|
||||
func NewUpgrader(outWriter io.Writer, kubeConfigPath string, log debugLog) (*Upgrader, error) {
|
||||
kubeClient, err := newClient(kubeConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use unstructured client to avoid importing the operator packages
|
||||
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building kubernetes config: %w", err)
|
||||
}
|
||||
|
||||
unstructuredClient, err := dynamic.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up custom resource client: %w", err)
|
||||
}
|
||||
|
||||
return &Upgrader{
|
||||
imageFetcher: imagefetcher.New(),
|
||||
outWriter: outWriter,
|
||||
log: log,
|
||||
stableInterface: &stableClient{client: kubeClient},
|
||||
dynamicInterface: &NodeVersionClient{client: unstructuredClient},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMeasurementSalt returns the measurementSalt from the join-config.
|
||||
func (u *Upgrader) GetMeasurementSalt(ctx context.Context) ([]byte, error) {
|
||||
cm, err := u.stableInterface.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving current join-config: %w", err)
|
||||
}
|
||||
salt, ok := cm.BinaryData[constants.MeasurementSaltFilename]
|
||||
if !ok {
|
||||
return nil, errors.New("measurementSalt missing from join-config")
|
||||
}
|
||||
return salt, nil
|
||||
}
|
||||
|
||||
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||
// The versions set in the config are validated against the versions running in the cluster.
|
||||
func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config, force bool) error {
|
||||
provider := conf.GetProvider()
|
||||
attestationVariant := conf.GetAttestationConfig().GetVariant()
|
||||
region := conf.GetRegion()
|
||||
imageReference, err := u.imageFetcher.FetchReference(ctx, provider, attestationVariant, conf.Image, region)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image reference: %w", err)
|
||||
}
|
||||
|
||||
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing version from image short path: %w", err)
|
||||
}
|
||||
|
||||
nodeVersion, err := u.getClusterStatus(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
upgradeErrs := []error{}
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
|
||||
err = u.updateImage(&nodeVersion, imageReference, imageVersion.Version(), force)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
|
||||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
|
||||
// We have to allow users to specify outdated k8s patch versions.
|
||||
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
|
||||
var components *corev1.ConfigMap
|
||||
currentK8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion, true)
|
||||
if err != nil {
|
||||
innerErr := fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", "))
|
||||
err = compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion, conf.KubernetesVersion, innerErr)
|
||||
} else {
|
||||
versionConfig := versions.VersionConfigs[currentK8sVersion]
|
||||
components, err = u.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force)
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
err := u.applyComponentsCM(ctx, components)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
|
||||
}
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
|
||||
default:
|
||||
return fmt.Errorf("updating Kubernetes version: %w", err)
|
||||
}
|
||||
|
||||
if len(upgradeErrs) == 2 {
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
updatedNodeVersion, err := u.applyNodeVersion(ctx, nodeVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying upgrade: %w", err)
|
||||
}
|
||||
switch {
|
||||
case updatedNodeVersion.Spec.ImageReference != nodeVersion.Spec.ImageReference:
|
||||
return &applyError{expected: nodeVersion.Spec.ImageReference, actual: updatedNodeVersion.Spec.ImageReference}
|
||||
case updatedNodeVersion.Spec.ImageVersion != nodeVersion.Spec.ImageVersion:
|
||||
return &applyError{expected: nodeVersion.Spec.ImageVersion, actual: updatedNodeVersion.Spec.ImageVersion}
|
||||
case updatedNodeVersion.Spec.KubernetesComponentsReference != nodeVersion.Spec.KubernetesComponentsReference:
|
||||
return &applyError{expected: nodeVersion.Spec.KubernetesComponentsReference, actual: updatedNodeVersion.Spec.KubernetesComponentsReference}
|
||||
case updatedNodeVersion.Spec.KubernetesClusterVersion != nodeVersion.Spec.KubernetesClusterVersion:
|
||||
return &applyError{expected: nodeVersion.Spec.KubernetesClusterVersion, actual: updatedNodeVersion.Spec.KubernetesClusterVersion}
|
||||
}
|
||||
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
// KubernetesVersion returns the version of Kubernetes the Constellation is currently running on.
|
||||
func (u *Upgrader) KubernetesVersion() (string, error) {
|
||||
return u.stableInterface.KubernetesVersion()
|
||||
}
|
||||
|
||||
// CurrentImage returns the currently used image version of the cluster.
|
||||
func (u *Upgrader) CurrentImage(ctx context.Context) (string, error) {
|
||||
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting constellation-version: %w", err)
|
||||
}
|
||||
return nodeVersion.Spec.ImageVersion, nil
|
||||
}
|
||||
|
||||
// CurrentKubernetesVersion returns the currently used Kubernetes version.
|
||||
func (u *Upgrader) CurrentKubernetesVersion(ctx context.Context) (string, error) {
|
||||
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting constellation-version: %w", err)
|
||||
}
|
||||
return nodeVersion.Spec.KubernetesClusterVersion, nil
|
||||
}
|
||||
|
||||
// GetClusterAttestationConfig fetches the join-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the attestation config.
|
||||
func (u *Upgrader) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) {
|
||||
existingConf, err := u.stableInterface.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
}
|
||||
if _, ok := existingConf.Data[constants.AttestationConfigFilename]; !ok {
|
||||
return nil, errors.New("attestation config missing from join-config")
|
||||
}
|
||||
|
||||
existingAttestationConfig, err := config.UnmarshalAttestationConfig([]byte(existingConf.Data[constants.AttestationConfigFilename]), variant)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
|
||||
}
|
||||
|
||||
return existingAttestationConfig, nil
|
||||
}
|
||||
|
||||
// BackupConfigMap creates a backup of the given config map.
|
||||
func (u *Upgrader) BackupConfigMap(ctx context.Context, name string) error {
|
||||
cm, err := u.stableInterface.GetConfigMap(ctx, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting config map %s: %w", name, err)
|
||||
}
|
||||
backup := cm.DeepCopy()
|
||||
backup.ObjectMeta = metav1.ObjectMeta{}
|
||||
backup.Name = fmt.Sprintf("%s-backup", name)
|
||||
if _, err := u.stableInterface.CreateConfigMap(ctx, backup); err != nil {
|
||||
if _, err := u.stableInterface.UpdateConfigMap(ctx, backup); err != nil {
|
||||
return fmt.Errorf("updating backup config map: %w", err)
|
||||
}
|
||||
}
|
||||
u.log.Debugf("Successfully backed up config map %s", cm.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAttestationConfig fetches the cluster's attestation config, compares them to a new config,
|
||||
// and updates the cluster's config if it is different from the new one.
|
||||
func (u *Upgrader) UpdateAttestationConfig(ctx context.Context, newAttestConfig config.AttestationCfg) error {
|
||||
// backup of previous measurements
|
||||
joinConfig, err := u.stableInterface.GetConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting join-config configmap: %w", err)
|
||||
}
|
||||
|
||||
newConfigJSON, err := json.Marshal(newAttestConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling attestation config: %w", err)
|
||||
}
|
||||
joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON)
|
||||
u.log.Debugf("Triggering attestation config update now")
|
||||
if _, err = u.stableInterface.UpdateConfigMap(ctx, joinConfig); err != nil {
|
||||
return fmt.Errorf("setting new attestation config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(u.outWriter, "Successfully updated the cluster's attestation config")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs.
|
||||
// Existing SANs are preserved.
|
||||
func (u *Upgrader) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error {
|
||||
clusterConfiguration, kubeadmConfig, err := u.GetClusterConfiguration(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting ClusterConfig: %w", err)
|
||||
}
|
||||
|
||||
existingSANs := make(map[string]struct{})
|
||||
for _, existingSAN := range clusterConfiguration.APIServer.CertSANs {
|
||||
existingSANs[existingSAN] = struct{}{}
|
||||
}
|
||||
|
||||
var missingSANs []string
|
||||
for _, san := range alternativeNames {
|
||||
if _, ok := existingSANs[san]; !ok {
|
||||
missingSANs = append(missingSANs, san)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingSANs) == 0 {
|
||||
return nil
|
||||
}
|
||||
u.log.Debugf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", "))
|
||||
|
||||
clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...)
|
||||
sort.Strings(clusterConfiguration.APIServer.CertSANs)
|
||||
|
||||
newConfigYAML, err := yaml.Marshal(clusterConfiguration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
||||
u.log.Debugf("Triggering kubeadm config update now")
|
||||
if _, err = u.stableInterface.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
|
||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(u.outWriter, "Successfully extended the cluster's apiserver SAN field")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterConfiguration fetches the kubeadm-config configmap from the cluster, extracts the config
|
||||
// and returns both the full configmap and the ClusterConfiguration.
|
||||
func (u *Upgrader) GetClusterConfiguration(ctx context.Context) (kubeadmv1beta3.ClusterConfiguration, *corev1.ConfigMap, error) {
|
||||
existingConf, err := u.stableInterface.GetConfigMap(ctx, constants.KubeadmConfigMap)
|
||||
if err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("retrieving current kubeadm-config: %w", err)
|
||||
}
|
||||
clusterConf, ok := existingConf.Data[constants.ClusterConfigurationKey]
|
||||
if !ok {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, errors.New("ClusterConfiguration missing from kubeadm-config")
|
||||
}
|
||||
|
||||
var existingClusterConfig kubeadmv1beta3.ClusterConfiguration
|
||||
if err := yaml.Unmarshal([]byte(clusterConf), &existingClusterConfig); err != nil {
|
||||
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("unmarshaling ClusterConfiguration: %w", err)
|
||||
}
|
||||
|
||||
return existingClusterConfig, existingConf, nil
|
||||
}
|
||||
|
||||
// applyComponentsCM applies the k8s components ConfigMap to the cluster.
|
||||
func (u *Upgrader) applyComponentsCM(ctx context.Context, components *corev1.ConfigMap) error {
|
||||
_, err := u.stableInterface.CreateConfigMap(ctx, components)
|
||||
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Upgrader) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) {
|
||||
u.log.Debugf("Triggering NodeVersion upgrade now")
|
||||
var updatedNodeVersion updatev1alpha1.NodeVersion
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
newNode, err := u.getClusterStatus(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving current NodeVersion: %w", err)
|
||||
}
|
||||
cmd := newUpgradeVersionCmd(nodeVersion)
|
||||
cmd.SetUpdatedVersions(&newNode)
|
||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
|
||||
}
|
||||
updated, err := u.dynamicInterface.Update(ctx, &unstructured.Unstructured{Object: raw})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
|
||||
return fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return updatedNodeVersion, err
|
||||
}
|
||||
|
||||
func (u *Upgrader) getClusterStatus(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
|
||||
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("retrieving current image: %w", err)
|
||||
}
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// updateImage upgrades the cluster to the given measurements and image.
|
||||
func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImageReference, newImageVersion string, force bool) error {
|
||||
currentImageVersion := nodeVersion.Spec.ImageVersion
|
||||
if !force {
|
||||
if upgradeInProgress(*nodeVersion) {
|
||||
return ErrInProgress
|
||||
}
|
||||
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
|
||||
return fmt.Errorf("validating image update: %w", err)
|
||||
}
|
||||
}
|
||||
u.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
|
||||
nodeVersion.Spec.ImageReference = newImageReference
|
||||
nodeVersion.Spec.ImageVersion = newImageVersion
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Upgrader) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components, force bool) (*corev1.ConfigMap, error) {
|
||||
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
|
||||
}
|
||||
if !force {
|
||||
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
u.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
|
||||
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
|
||||
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
|
||||
|
||||
return &configMap, nil
|
||||
}
|
||||
|
||||
// NodeVersionClient implements the DynamicInterface interface to interact with NodeVersion objects.
|
||||
type NodeVersionClient struct {
|
||||
client dynamic.Interface
|
||||
}
|
||||
|
||||
// NewNodeVersionClient returns a new NodeVersionClient.
|
||||
func NewNodeVersionClient(client dynamic.Interface) *NodeVersionClient {
|
||||
return &NodeVersionClient{client: client}
|
||||
}
|
||||
|
||||
// GetCurrent returns the current NodeVersion object.
|
||||
func (u *NodeVersionClient) GetCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error) {
|
||||
return u.client.Resource(schema.GroupVersionResource{
|
||||
Group: "update.edgeless.systems",
|
||||
Version: "v1alpha1",
|
||||
Resource: "nodeversions",
|
||||
}).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// Update updates the NodeVersion object.
|
||||
func (u *NodeVersionClient) Update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
return u.client.Resource(schema.GroupVersionResource{
|
||||
Group: "update.edgeless.systems",
|
||||
Version: "v1alpha1",
|
||||
Resource: "nodeversions",
|
||||
}).Update(ctx, obj, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// DynamicInterface is a general interface to query custom resources.
|
||||
type DynamicInterface interface {
|
||||
GetCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error)
|
||||
Update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
// upgradeInProgress checks if an upgrade is in progress.
|
||||
// Returns true with errors as it's the "safer" response. If caller does not check err they at least won't update the cluster.
|
||||
func upgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
||||
conditions := nodeVersion.Status.Conditions
|
||||
activeUpgrade := nodeVersion.Status.ActiveClusterVersionUpgrade
|
||||
|
||||
if activeUpgrade {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type debugLog interface {
|
||||
Debugf(format string, args ...any)
|
||||
Sync()
|
||||
}
|
||||
|
||||
// imageFetcher gets an image reference from the versionsapi.
|
||||
type imageFetcher interface {
|
||||
FetchReference(ctx context.Context,
|
||||
provider cloudprovider.Provider, attestationVariant variant.Variant,
|
||||
image, region string,
|
||||
) (string, error)
|
||||
}
|
||||
|
||||
type upgradeVersionCmd struct {
|
||||
imageVersion string
|
||||
imageRef string
|
||||
k8sComponentsRef string
|
||||
k8sVersion string
|
||||
}
|
||||
|
||||
func newUpgradeVersionCmd(newNodeVersion updatev1alpha1.NodeVersion) upgradeVersionCmd {
|
||||
return upgradeVersionCmd{
|
||||
imageVersion: newNodeVersion.Spec.ImageVersion,
|
||||
imageRef: newNodeVersion.Spec.ImageReference,
|
||||
k8sComponentsRef: newNodeVersion.Spec.KubernetesComponentsReference,
|
||||
k8sVersion: newNodeVersion.Spec.KubernetesClusterVersion,
|
||||
}
|
||||
}
|
||||
|
||||
func (u upgradeVersionCmd) SetUpdatedVersions(node *updatev1alpha1.NodeVersion) {
|
||||
if u.imageVersion != "" {
|
||||
node.Spec.ImageVersion = u.imageVersion
|
||||
}
|
||||
if u.imageRef != "" {
|
||||
node.Spec.ImageReference = u.imageRef
|
||||
}
|
||||
if u.k8sComponentsRef != "" {
|
||||
node.Spec.KubernetesComponentsReference = u.k8sComponentsRef
|
||||
}
|
||||
if u.k8sVersion != "" {
|
||||
node.Spec.KubernetesClusterVersion = u.k8sVersion
|
||||
}
|
||||
}
|
@ -19,6 +19,7 @@ go_library(
|
||||
"@io_k8s_apimachinery//pkg/types",
|
||||
"@io_k8s_client_go//dynamic",
|
||||
"@io_k8s_client_go//kubernetes",
|
||||
"@io_k8s_client_go//rest",
|
||||
"@io_k8s_client_go//scale/scheme",
|
||||
"@io_k8s_client_go//tools/clientcmd",
|
||||
"@io_k8s_client_go//util/retry",
|
||||
|
@ -4,8 +4,10 @@ Copyright (c) Edgeless Systems GmbH
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// Package kubectl provides a kubectl-like interface for Kubernetes.
|
||||
// Functions defined here should not make use of [os/exec].
|
||||
/*
|
||||
Package kubectl provides a kubectl-like interface for Kubernetes.
|
||||
Functions defined here should not make use of [os/exec].
|
||||
*/
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
@ -26,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/scale/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
@ -38,35 +41,34 @@ type Kubectl struct {
|
||||
apiextensionClient apiextensionsclientv1.ApiextensionsV1Interface
|
||||
}
|
||||
|
||||
// New returns an empty Kubectl client. Need to call Initialize before usable.
|
||||
func New() *Kubectl {
|
||||
// NewUninitialized returns an empty Kubectl client.
|
||||
// Initialize needs to be called before the client is usable.
|
||||
func NewUninitialized() *Kubectl {
|
||||
return &Kubectl{}
|
||||
}
|
||||
|
||||
// NewFromConfig returns a Kubectl client using the given kubeconfig.
|
||||
func NewFromConfig(kubeconfigPath string) (*Kubectl, error) {
|
||||
clientConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating k8s client from kubeconfig: %w", err)
|
||||
}
|
||||
k := &Kubectl{}
|
||||
if err := k.initialize(clientConfig); err != nil {
|
||||
return nil, fmt.Errorf("initializing kubectl: %w", err)
|
||||
}
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// Initialize sets sets all required fields so the Kubectl client can be used.
|
||||
func (k *Kubectl) Initialize(kubeconfig []byte) error {
|
||||
clientConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating k8s client config from kubeconfig: %w", err)
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating k8s client from kubeconfig: %w", err)
|
||||
if err := k.initialize(clientConfig); err != nil {
|
||||
return fmt.Errorf("initializing kubectl: %w", err)
|
||||
}
|
||||
k.Interface = clientset
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating unstructed client: %w", err)
|
||||
}
|
||||
k.dynamicClient = dynamicClient
|
||||
|
||||
apiextensionClient, err := apiextensionsclientv1.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating api extension client from kubeconfig: %w", err)
|
||||
}
|
||||
k.apiextensionClient = apiextensionClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -86,8 +88,8 @@ func (k *Kubectl) ApplyCRD(ctx context.Context, rawCRD []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// GetCRDs retrieves all custom resource definitions currently installed in the cluster.
|
||||
func (k *Kubectl) GetCRDs(ctx context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
// ListCRDs retrieves all custom resource definitions currently installed in the cluster.
|
||||
func (k *Kubectl) ListCRDs(ctx context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) {
|
||||
crds, err := k.apiextensionClient.CustomResourceDefinitions().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing CRDs: %w", err)
|
||||
@ -96,10 +98,9 @@ func (k *Kubectl) GetCRDs(ctx context.Context) ([]apiextensionsv1.CustomResource
|
||||
return crds.Items, nil
|
||||
}
|
||||
|
||||
// GetCRs retrieves all objects for a given CRD.
|
||||
func (k *Kubectl) GetCRs(ctx context.Context, gvr schema.GroupVersionResource) ([]unstructured.Unstructured, error) {
|
||||
crdClient := k.dynamicClient.Resource(gvr)
|
||||
unstructuredList, err := crdClient.List(ctx, metav1.ListOptions{})
|
||||
// ListCRs retrieves all objects for a given CRD.
|
||||
func (k *Kubectl) ListCRs(ctx context.Context, gvr schema.GroupVersionResource) ([]unstructured.Unstructured, error) {
|
||||
unstructuredList, err := k.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing CRs for GroupVersionResource %+v: %w", gvr, err)
|
||||
}
|
||||
@ -107,15 +108,35 @@ func (k *Kubectl) GetCRs(ctx context.Context, gvr schema.GroupVersionResource) (
|
||||
return unstructuredList.Items, nil
|
||||
}
|
||||
|
||||
// GetCR retrieves a Custom Resource given it's name and group version resource.
|
||||
func (k *Kubectl) GetCR(ctx context.Context, gvr schema.GroupVersionResource, name string) (*unstructured.Unstructured, error) {
|
||||
return k.dynamicClient.Resource(gvr).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// UpdateCR updates a Custom Resource given it's and group version resource.
|
||||
func (k *Kubectl) UpdateCR(ctx context.Context, gvr schema.GroupVersionResource, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
return k.dynamicClient.Resource(gvr).Update(ctx, obj, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// CreateConfigMap creates the provided configmap.
|
||||
func (k *Kubectl) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error {
|
||||
_, err := k.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Create(ctx, &configMap, metav1.CreateOptions{})
|
||||
func (k *Kubectl) CreateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) error {
|
||||
_, err := k.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Create(ctx, configMap, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfigMap returns a ConfigMap given it's name and namespace.
|
||||
func (k *Kubectl) GetConfigMap(ctx context.Context, namespace, name string) (*corev1.ConfigMap, error) {
|
||||
return k.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// UpdateConfigMap updates the given ConfigMap.
|
||||
func (k *Kubectl) UpdateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
return k.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(ctx, configMap, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// AnnotateNode adds the provided annotations to the node, identified by name.
|
||||
func (k *Kubectl) AnnotateNode(ctx context.Context, nodeName, annotationKey, annotationValue string) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
@ -132,20 +153,13 @@ func (k *Kubectl) AnnotateNode(ctx context.Context, nodeName, annotationKey, ann
|
||||
})
|
||||
}
|
||||
|
||||
// PatchFirstNodePodCIDR patches the firstNodePodCIDR of the first control-plane node for Cilium.
|
||||
func (k *Kubectl) PatchFirstNodePodCIDR(ctx context.Context, firstNodePodCIDR string) error {
|
||||
selector := labels.Set{"node-role.kubernetes.io/control-plane": ""}.AsSelector()
|
||||
controlPlaneList, err := k.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
|
||||
// KubernetesVersion returns the Kubernetes version of the cluster.
|
||||
func (k *Kubectl) KubernetesVersion() (string, error) {
|
||||
serverVersion, err := k.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
if len(controlPlaneList.Items) != 1 {
|
||||
return fmt.Errorf("expected 1 control-plane node, got %d", len(controlPlaneList.Items))
|
||||
}
|
||||
nodeName := controlPlaneList.Items[0].Name
|
||||
// Update the node's spec
|
||||
_, err = k.CoreV1().Nodes().Patch(context.Background(), nodeName, types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"podCIDR":"%s"}}`, firstNodePodCIDR)), metav1.PatchOptions{})
|
||||
return err
|
||||
return serverVersion.GitVersion, nil
|
||||
}
|
||||
|
||||
// ListAllNamespaces returns all namespaces in the cluster.
|
||||
@ -162,6 +176,22 @@ func (k *Kubectl) GetNodes(ctx context.Context) ([]corev1.Node, error) {
|
||||
return nodes.Items, nil
|
||||
}
|
||||
|
||||
// PatchFirstNodePodCIDR patches the firstNodePodCIDR of the first control-plane node for Cilium.
|
||||
func (k *Kubectl) PatchFirstNodePodCIDR(ctx context.Context, firstNodePodCIDR string) error {
|
||||
selector := labels.Set{"node-role.kubernetes.io/control-plane": ""}.AsSelector()
|
||||
controlPlaneList, err := k.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(controlPlaneList.Items) != 1 {
|
||||
return fmt.Errorf("expected 1 control-plane node, got %d", len(controlPlaneList.Items))
|
||||
}
|
||||
nodeName := controlPlaneList.Items[0].Name
|
||||
// Update the node's spec
|
||||
_, err = k.CoreV1().Nodes().Patch(context.Background(), nodeName, types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"podCIDR":"%s"}}`, firstNodePodCIDR)), metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// EnforceCoreDNSSpread adds a pod anti-affinity to the CoreDNS deployment to ensure that
|
||||
// CoreDNS pods are spread across nodes.
|
||||
func (k *Kubectl) EnforceCoreDNSSpread(ctx context.Context) error {
|
||||
@ -250,6 +280,28 @@ func (k *Kubectl) AddNodeSelectorsToDeployment(ctx context.Context, selectors ma
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kubectl) initialize(clientConfig *rest.Config) error {
|
||||
clientset, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating k8s client from kubeconfig: %w", err)
|
||||
}
|
||||
k.Interface = clientset
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating unstructured client: %w", err)
|
||||
}
|
||||
k.dynamicClient = dynamicClient
|
||||
|
||||
apiextensionClient, err := apiextensionsclientv1.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating api extension client from kubeconfig: %w", err)
|
||||
}
|
||||
k.apiextensionClient = apiextensionClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseCRD takes a byte slice of data and tries to create a CustomResourceDefinition object from it.
|
||||
func parseCRD(crdString []byte) (*v1.CustomResourceDefinition, error) {
|
||||
sch := runtime.NewScheme()
|
||||
|
Loading…
Reference in New Issue
Block a user