mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-12-15 08:05:19 -05:00
cli: add kubernetes pkg to interface with cluster
Previously the content of files status and upgrade within the cloudcmd pkg did not fit cloudcmd's pkg description. This patch introduces a separate pkg to fix that.
This commit is contained in:
parent
c8c2953d7b
commit
7c8215e507
13 changed files with 628 additions and 70 deletions
58
cli/internal/kubernetes/BUILD.bazel
Normal file
58
cli/internal/kubernetes/BUILD.bazel
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "kubernetes",
|
||||
srcs = [
|
||||
"kubernetes.go",
|
||||
"status.go",
|
||||
"upgrade.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/kubernetes",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
"//cli/internal/helm",
|
||||
"//cli/internal/image",
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/compatibility",
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/kubernetes",
|
||||
"//internal/kubernetes/kubectl",
|
||||
"//internal/versions",
|
||||
"//internal/versions/components",
|
||||
"//internal/versionsapi",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/api/errors",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
"@io_k8s_apimachinery//pkg/runtime/schema",
|
||||
"@io_k8s_client_go//dynamic",
|
||||
"@io_k8s_client_go//kubernetes",
|
||||
"@io_k8s_client_go//tools/clientcmd",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "kubernetes_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
embed = [":kubernetes"],
|
||||
deps = [
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/compatibility",
|
||||
"//internal/config",
|
||||
"//internal/constants",
|
||||
"//internal/logger",
|
||||
"//internal/versions",
|
||||
"//internal/versions/components",
|
||||
"//operators/constellation-node-operator/api/v1alpha1",
|
||||
"@com_github_stretchr_testify//assert",
|
||||
"@com_github_stretchr_testify//require",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime",
|
||||
],
|
||||
)
|
||||
12
cli/internal/kubernetes/kubernetes.go
Normal file
12
cli/internal/kubernetes/kubernetes.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
/*
|
||||
Package kubernetes provides functions to interact with a live cluster to the CLI.
|
||||
|
||||
Currently it is used to implement the status and upgrade commands.
|
||||
*/
|
||||
package kubernetes
|
||||
92
cli/internal/kubernetes/status.go
Normal file
92
cli/internal/kubernetes/status.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// TargetVersions bundles version information about the target versions of a cluster.
|
||||
type TargetVersions struct {
|
||||
// image version
|
||||
image string
|
||||
// CSP specific path to the image
|
||||
imageReference string
|
||||
// kubernetes version
|
||||
kubernetes string
|
||||
}
|
||||
|
||||
// NewTargetVersions returns the target versions for the cluster.
|
||||
func NewTargetVersions(nodeVersion updatev1alpha1.NodeVersion) (TargetVersions, error) {
|
||||
return TargetVersions{
|
||||
image: nodeVersion.Spec.ImageVersion,
|
||||
imageReference: nodeVersion.Spec.ImageReference,
|
||||
kubernetes: nodeVersion.Spec.KubernetesClusterVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Image return the image version.
|
||||
func (c *TargetVersions) Image() string {
|
||||
return c.image
|
||||
}
|
||||
|
||||
// ImagePath return the image path.
|
||||
func (c *TargetVersions) ImagePath() string {
|
||||
return c.imageReference
|
||||
}
|
||||
|
||||
// Kubernetes return the Kubernetes version.
|
||||
func (c *TargetVersions) Kubernetes() string {
|
||||
return c.kubernetes
|
||||
}
|
||||
|
||||
// ClusterStatus returns a map from node name to NodeStatus.
|
||||
func ClusterStatus(ctx context.Context, kubeclient kubeClient) (map[string]NodeStatus, error) {
|
||||
nodes, err := kubeclient.GetNodes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting nodes: %w", err)
|
||||
}
|
||||
|
||||
clusterStatus := map[string]NodeStatus{}
|
||||
for _, node := range nodes {
|
||||
clusterStatus[node.ObjectMeta.Name] = NewNodeStatus(node)
|
||||
}
|
||||
|
||||
return clusterStatus, nil
|
||||
}
|
||||
|
||||
// NodeStatus bundles status information about a node.
|
||||
type NodeStatus struct {
|
||||
kubeletVersion string
|
||||
imageVersion string
|
||||
}
|
||||
|
||||
// NewNodeStatus returns a new NodeStatus.
|
||||
func NewNodeStatus(node corev1.Node) NodeStatus {
|
||||
return NodeStatus{
|
||||
kubeletVersion: node.Status.NodeInfo.KubeletVersion,
|
||||
imageVersion: node.ObjectMeta.Annotations["constellation.edgeless.systems/node-image"],
|
||||
}
|
||||
}
|
||||
|
||||
// KubeletVersion returns the kubelet version of the node.
|
||||
func (n *NodeStatus) KubeletVersion() string {
|
||||
return n.kubeletVersion
|
||||
}
|
||||
|
||||
// ImageVersion returns the node image of the node.
|
||||
func (n *NodeStatus) ImageVersion() string {
|
||||
return n.imageVersion
|
||||
}
|
||||
|
||||
type kubeClient interface {
|
||||
GetNodes(ctx context.Context) ([]corev1.Node, error)
|
||||
}
|
||||
420
cli/internal/kubernetes/upgrade.go
Normal file
420
cli/internal/kubernetes/upgrade.go
Normal file
|
|
@ -0,0 +1,420 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/image"
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
internalk8s "github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versionsapi"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// ErrInProgress signals that an upgrade is in progress inside the cluster.
|
||||
var ErrInProgress = errors.New("upgrade in progress")
|
||||
|
||||
// GetConstellationVersion queries the constellation-version object for a given field.
|
||||
func GetConstellationVersion(ctx context.Context, client DynamicInterface) (updatev1alpha1.NodeVersion, error) {
|
||||
raw, err := client.GetCurrent(ctx, "constellation-version")
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, err
|
||||
}
|
||||
var nodeVersion updatev1alpha1.NodeVersion
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.UnstructuredContent(), &nodeVersion); err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||
}
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// Upgrader handles upgrading the cluster's components using the CLI.
|
||||
type Upgrader struct {
|
||||
stableInterface stableInterface
|
||||
dynamicInterface DynamicInterface
|
||||
helmClient helmInterface
|
||||
imageFetcher imageFetcher
|
||||
outWriter io.Writer
|
||||
log debugLog
|
||||
}
|
||||
|
||||
// NewUpgrader returns a new Upgrader.
|
||||
func NewUpgrader(outWriter io.Writer, log debugLog) (*Upgrader, error) {
|
||||
kubeConfig, err := clientcmd.BuildConfigFromFlags("", constants.AdminConfFilename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building kubernetes config: %w", err)
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// use unstructured client to avoid importing the operator packages
|
||||
unstructuredClient, err := dynamic.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up custom resource client: %w", err)
|
||||
}
|
||||
|
||||
helmClient, err := helm.NewClient(kubectl.New(), constants.AdminConfFilename, constants.HelmNamespace, log)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up helm client: %w", err)
|
||||
}
|
||||
|
||||
return &Upgrader{
|
||||
stableInterface: &stableClient{client: kubeClient},
|
||||
dynamicInterface: &NodeVersionClient{client: unstructuredClient},
|
||||
helmClient: helmClient,
|
||||
imageFetcher: image.New(),
|
||||
outWriter: outWriter,
|
||||
log: log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpgradeHelmServices upgrade helm services.
|
||||
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error {
|
||||
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive)
|
||||
}
|
||||
|
||||
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||
// The versions set in the config are validated against the versions running in the cluster.
|
||||
func (u *Upgrader) UpgradeNodeVersion(ctx context.Context, conf *config.Config) error {
|
||||
imageReference, err := u.imageFetcher.FetchReference(ctx, conf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image reference: %w", err)
|
||||
}
|
||||
|
||||
imageVersion, err := versionsapi.NewVersionFromShortPath(conf.Image, versionsapi.VersionKindImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing version from image short path: %w", err)
|
||||
}
|
||||
|
||||
currentK8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting Kubernetes version: %w", err)
|
||||
}
|
||||
versionConfig := versions.VersionConfigs[currentK8sVersion]
|
||||
|
||||
nodeVersion, err := u.checkClusterStatus(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
upgradeErrs := []error{}
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
err = u.updateImage(&nodeVersion, imageReference, imageVersion.Version)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
|
||||
case err != nil:
|
||||
return fmt.Errorf("updating image version: %w", err)
|
||||
}
|
||||
|
||||
components, err := u.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents)
|
||||
switch {
|
||||
case err == nil:
|
||||
err := u.applyComponentsCM(ctx, components)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
|
||||
}
|
||||
case errors.As(err, &upgradeErr):
|
||||
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
|
||||
default:
|
||||
return fmt.Errorf("updating Kubernetes version: %w", err)
|
||||
}
|
||||
|
||||
if len(upgradeErrs) == 2 {
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
if err := u.UpdateMeasurements(ctx, conf.GetMeasurements()); err != nil {
|
||||
return fmt.Errorf("updating measurements: %w", err)
|
||||
}
|
||||
|
||||
updatedNodeVersion, err := u.applyNodeVersion(ctx, nodeVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying upgrade: %w", err)
|
||||
}
|
||||
switch {
|
||||
case updatedNodeVersion.Spec.ImageReference != imageReference:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", imageReference, updatedNodeVersion.Spec.ImageReference)
|
||||
case updatedNodeVersion.Spec.ImageVersion != imageVersion.Version:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", imageVersion.Version, updatedNodeVersion.Spec.ImageVersion)
|
||||
case updatedNodeVersion.Spec.KubernetesComponentsReference != nodeVersion.Spec.KubernetesComponentsReference:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", nodeVersion.Spec.KubernetesComponentsReference, updatedNodeVersion.Spec.KubernetesComponentsReference)
|
||||
case updatedNodeVersion.Spec.KubernetesClusterVersion != versionConfig.ClusterVersion:
|
||||
return fmt.Errorf("expected NodeVersion to contain %s, got %s", versionConfig.ClusterVersion, updatedNodeVersion.Spec.KubernetesClusterVersion)
|
||||
}
|
||||
|
||||
return errors.Join(upgradeErrs...)
|
||||
}
|
||||
|
||||
// KubernetesVersion returns the version of Kubernetes the Constellation is currently running on.
|
||||
func (u *Upgrader) KubernetesVersion() (string, error) {
|
||||
return u.stableInterface.kubernetesVersion()
|
||||
}
|
||||
|
||||
// CurrentImage returns the currently used image version of the cluster.
|
||||
func (u *Upgrader) CurrentImage(ctx context.Context) (string, error) {
|
||||
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting constellation-version: %w", err)
|
||||
}
|
||||
return nodeVersion.Spec.ImageVersion, nil
|
||||
}
|
||||
|
||||
// CurrentKubernetesVersion returns the currently used Kubernetes version.
|
||||
func (u *Upgrader) CurrentKubernetesVersion(ctx context.Context) (string, error) {
|
||||
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting constellation-version: %w", err)
|
||||
}
|
||||
return nodeVersion.Spec.KubernetesClusterVersion, nil
|
||||
}
|
||||
|
||||
// UpdateMeasurements fetches the cluster's measurements, compares them to a set of new measurements
|
||||
// and updates the cluster's measurements if they are different from the new ones.
|
||||
func (u *Upgrader) UpdateMeasurements(ctx context.Context, newMeasurements measurements.M) error {
|
||||
currentMeasurements, existingConf, err := u.GetClusterMeasurements(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting cluster measurements: %w", err)
|
||||
}
|
||||
if currentMeasurements.EqualTo(newMeasurements) {
|
||||
fmt.Fprintln(u.outWriter, "Cluster is already using the chosen measurements, skipping measurements upgrade")
|
||||
return nil
|
||||
}
|
||||
|
||||
// backup of previous measurements
|
||||
existingConf.Data["oldMeasurements"] = existingConf.Data[constants.MeasurementsFilename]
|
||||
|
||||
measurementsJSON, err := json.Marshal(newMeasurements)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling measurements: %w", err)
|
||||
}
|
||||
existingConf.Data[constants.MeasurementsFilename] = string(measurementsJSON)
|
||||
u.log.Debugf("Triggering measurements config map update now")
|
||||
if _, err = u.stableInterface.updateConfigMap(ctx, existingConf); err != nil {
|
||||
return fmt.Errorf("setting new measurements: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(u.outWriter, "Successfully updated the cluster's expected measurements")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterMeasurements fetches the join-config configmap from the cluster, extracts the measurements
|
||||
// and returns both the full configmap and the measurements.
|
||||
func (u *Upgrader) GetClusterMeasurements(ctx context.Context) (measurements.M, *corev1.ConfigMap, error) {
|
||||
existingConf, err := u.stableInterface.getCurrentConfigMap(ctx, constants.JoinConfigMap)
|
||||
if err != nil {
|
||||
return measurements.M{}, &corev1.ConfigMap{}, fmt.Errorf("retrieving current measurements: %w", err)
|
||||
}
|
||||
if _, ok := existingConf.Data[constants.MeasurementsFilename]; !ok {
|
||||
return measurements.M{}, &corev1.ConfigMap{}, errors.New("measurements missing from join-config")
|
||||
}
|
||||
var currentMeasurements measurements.M
|
||||
if err := json.Unmarshal([]byte(existingConf.Data[constants.MeasurementsFilename]), ¤tMeasurements); err != nil {
|
||||
return measurements.M{}, &corev1.ConfigMap{}, fmt.Errorf("retrieving current measurements: %w", err)
|
||||
}
|
||||
|
||||
return currentMeasurements, existingConf, nil
|
||||
}
|
||||
|
||||
// applyComponentsCM applies the k8s components ConfigMap to the cluster.
|
||||
func (u *Upgrader) applyComponentsCM(ctx context.Context, components *corev1.ConfigMap) error {
|
||||
_, err := u.stableInterface.createConfigMap(ctx, components)
|
||||
// If the map already exists we can use that map and assume it has the same content as 'configMap'.
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Upgrader) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) {
|
||||
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting nodeVersion to unstructured: %w", err)
|
||||
}
|
||||
u.log.Debugf("Triggering NodeVersion upgrade now")
|
||||
// Send the updated NodeVersion resource
|
||||
updated, err := u.dynamicInterface.Update(ctx, &unstructured.Unstructured{Object: raw})
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("updating NodeVersion: %w", err)
|
||||
}
|
||||
|
||||
var updatedNodeVersion updatev1alpha1.NodeVersion
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
|
||||
}
|
||||
|
||||
return updatedNodeVersion, nil
|
||||
}
|
||||
|
||||
func (u *Upgrader) checkClusterStatus(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
|
||||
nodeVersion, err := GetConstellationVersion(ctx, u.dynamicInterface)
|
||||
if err != nil {
|
||||
return updatev1alpha1.NodeVersion{}, fmt.Errorf("retrieving current image: %w", err)
|
||||
}
|
||||
|
||||
if upgradeInProgress(nodeVersion) {
|
||||
return updatev1alpha1.NodeVersion{}, ErrInProgress
|
||||
}
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
||||
// updateImage upgrades the cluster to the given measurements and image.
|
||||
func (u *Upgrader) updateImage(nodeVersion *updatev1alpha1.NodeVersion, newImageReference, newImageVersion string) error {
|
||||
currentImageVersion := nodeVersion.Spec.ImageVersion
|
||||
|
||||
if err := compatibility.IsValidUpgrade(currentImageVersion, newImageVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, newImageVersion)
|
||||
nodeVersion.Spec.ImageReference = newImageReference
|
||||
nodeVersion.Spec.ImageVersion = newImageVersion
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Upgrader) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterVersion string, components components.Components) (*corev1.ConfigMap, error) {
|
||||
configMap, err := internalk8s.ConstructK8sComponentsCM(components, newClusterVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing k8s-components ConfigMap: %w", err)
|
||||
}
|
||||
|
||||
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
|
||||
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
|
||||
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
|
||||
|
||||
return &configMap, nil
|
||||
}
|
||||
|
||||
// NodeVersionClient implements the DynamicInterface interface to interact with NodeVersion objects.
|
||||
type NodeVersionClient struct {
|
||||
client dynamic.Interface
|
||||
}
|
||||
|
||||
// NewNodeVersionClient returns a new NodeVersionClient.
|
||||
func NewNodeVersionClient(client dynamic.Interface) *NodeVersionClient {
|
||||
return &NodeVersionClient{client: client}
|
||||
}
|
||||
|
||||
// GetCurrent returns the current NodeVersion object.
|
||||
func (u *NodeVersionClient) GetCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error) {
|
||||
return u.client.Resource(schema.GroupVersionResource{
|
||||
Group: "update.edgeless.systems",
|
||||
Version: "v1alpha1",
|
||||
Resource: "nodeversions",
|
||||
}).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// Update updates the NodeVersion object.
|
||||
func (u *NodeVersionClient) Update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
return u.client.Resource(schema.GroupVersionResource{
|
||||
Group: "update.edgeless.systems",
|
||||
Version: "v1alpha1",
|
||||
Resource: "nodeversions",
|
||||
}).Update(ctx, obj, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// DynamicInterface is a general interface to query custom resources.
|
||||
type DynamicInterface interface {
|
||||
GetCurrent(ctx context.Context, name string) (*unstructured.Unstructured, error)
|
||||
Update(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
// upgradeInProgress checks if an upgrade is in progress.
|
||||
// Returns true with errors as it's the "safer" response. If caller does not check err they at least won't update the cluster.
|
||||
func upgradeInProgress(nodeVersion updatev1alpha1.NodeVersion) bool {
|
||||
conditions := nodeVersion.Status.Conditions
|
||||
activeUpgrade := nodeVersion.Status.ActiveClusterVersionUpgrade
|
||||
|
||||
if activeUpgrade {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == updatev1alpha1.ConditionOutdated && condition.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type stableInterface interface {
|
||||
getCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error)
|
||||
updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
kubernetesVersion() (string, error)
|
||||
}
|
||||
|
||||
type stableClient struct {
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
// getCurrent returns a ConfigMap given it's name.
|
||||
func (u *stableClient) getCurrentConfigMap(ctx context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Get(ctx, name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// update updates the given ConfigMap.
|
||||
func (u *stableClient) updateConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Update(ctx, configMap, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
func (u *stableClient) createConfigMap(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
return u.client.CoreV1().ConfigMaps(constants.ConstellationNamespace).Create(ctx, configMap, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (u *stableClient) kubernetesVersion() (string, error) {
|
||||
serverVersion, err := u.client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return serverVersion.GitVersion, nil
|
||||
}
|
||||
|
||||
type helmInterface interface {
|
||||
Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool) error
|
||||
}
|
||||
|
||||
type debugLog interface {
|
||||
Debugf(format string, args ...any)
|
||||
Sync()
|
||||
}
|
||||
|
||||
// imageFetcher gets an image reference from the versionsapi.
|
||||
type imageFetcher interface {
|
||||
FetchReference(ctx context.Context, config *config.Config) (string, error)
|
||||
}
|
||||
479
cli/internal/kubernetes/upgrade_test.go
Normal file
479
cli/internal/kubernetes/upgrade_test.go
Normal file
|
|
@ -0,0 +1,479 @@
|
|||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func TestUpgradeNodeVersion(t *testing.T) {
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
stable *stubStableClient
|
||||
conditions []metav1.Condition
|
||||
currentImageVersion string
|
||||
currentClusterVersion string
|
||||
conf *config.Config
|
||||
getErr error
|
||||
wantErr bool
|
||||
wantUpdate bool
|
||||
assertCorrectError func(t *testing.T, err error) bool
|
||||
}{
|
||||
"success": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"only k8s upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.2"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"only image upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[0]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
wantUpdate: true,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"not an upgrade": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.2"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[0]
|
||||
return conf
|
||||
}(),
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &stubStableClient{},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
upgradeErr := &compatibility.InvalidUpgradeError{}
|
||||
return assert.ErrorAs(t, err, &upgradeErr)
|
||||
},
|
||||
},
|
||||
"upgrade in progress": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
conf.KubernetesVersion = versions.SupportedK8sVersions()[1]
|
||||
return conf
|
||||
}(),
|
||||
conditions: []metav1.Condition{{
|
||||
Type: updatev1alpha1.ConditionOutdated,
|
||||
Status: metav1.ConditionTrue,
|
||||
}},
|
||||
currentImageVersion: "v1.2.2",
|
||||
currentClusterVersion: versions.SupportedK8sVersions()[0],
|
||||
stable: &stubStableClient{},
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, ErrInProgress)
|
||||
},
|
||||
},
|
||||
"get error": {
|
||||
conf: func() *config.Config {
|
||||
conf := config.Default()
|
||||
conf.Image = "v1.2.3"
|
||||
return conf
|
||||
}(),
|
||||
getErr: someErr,
|
||||
wantErr: true,
|
||||
assertCorrectError: func(t *testing.T, err error) bool {
|
||||
return assert.ErrorIs(t, err, someErr)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
nodeVersion := updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageVersion: tc.currentImageVersion,
|
||||
KubernetesClusterVersion: tc.currentClusterVersion,
|
||||
},
|
||||
Status: updatev1alpha1.NodeVersionStatus{
|
||||
Conditions: tc.conditions,
|
||||
},
|
||||
}
|
||||
|
||||
unstrNodeVersion, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nodeVersion)
|
||||
require.NoError(err)
|
||||
|
||||
dynamicClient := &stubDynamicClient{object: &unstructured.Unstructured{Object: unstrNodeVersion}, getErr: tc.getErr}
|
||||
upgrader := Upgrader{
|
||||
stableInterface: tc.stable,
|
||||
dynamicInterface: dynamicClient,
|
||||
imageFetcher: &stubImageFetcher{},
|
||||
log: logger.NewTest(t),
|
||||
outWriter: io.Discard,
|
||||
}
|
||||
|
||||
err = upgrader.UpgradeNodeVersion(context.Background(), tc.conf)
|
||||
|
||||
// Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade.
|
||||
if tc.wantUpdate {
|
||||
assert.NotNil(dynamicClient.updatedObject)
|
||||
} else {
|
||||
assert.Nil(dynamicClient.updatedObject)
|
||||
}
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
tc.assertCorrectError(t, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateMeasurements(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
updater *stubStableClient
|
||||
newMeasurements measurements.M
|
||||
wantUpdate bool
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
updater: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
newMeasurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xBB, measurements.Enforce),
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"measurements are the same": {
|
||||
updater: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
newMeasurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xAA, measurements.Enforce),
|
||||
},
|
||||
},
|
||||
"setting warnOnly to true is allowed": {
|
||||
updater: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
},
|
||||
newMeasurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xAA, measurements.WarnOnly),
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"setting warnOnly to false is allowed": {
|
||||
updater: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":true}}`),
|
||||
},
|
||||
},
|
||||
newMeasurements: measurements.M{
|
||||
0: measurements.WithAllBytes(0xAA, measurements.Enforce),
|
||||
},
|
||||
wantUpdate: true,
|
||||
},
|
||||
"getCurrent error": {
|
||||
updater: &stubStableClient{getErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
"update error": {
|
||||
updater: &stubStableClient{
|
||||
configMaps: map[string]*corev1.ConfigMap{
|
||||
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||
},
|
||||
updateErr: someErr,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
upgrader := &Upgrader{
|
||||
stableInterface: tc.updater,
|
||||
outWriter: io.Discard,
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
err := upgrader.UpdateMeasurements(context.Background(), tc.newMeasurements)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
if tc.wantUpdate {
|
||||
newMeasurementsJSON, err := json.Marshal(tc.newMeasurements)
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(string(newMeasurementsJSON), tc.updater.updatedConfigMaps[constants.JoinConfigMap].Data[constants.MeasurementsFilename])
|
||||
} else {
|
||||
assert.Nil(tc.updater.updatedConfigMaps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateImage(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
newImageReference string
|
||||
newImageVersion string
|
||||
oldImageReference string
|
||||
oldImageVersion string
|
||||
updateErr error
|
||||
wantUpdate bool
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
oldImageReference: "old-image-ref",
|
||||
oldImageVersion: "v0.0.0",
|
||||
newImageReference: "new-image-ref",
|
||||
newImageVersion: "v0.1.0",
|
||||
wantUpdate: true,
|
||||
},
|
||||
"same version fails": {
|
||||
oldImageVersion: "v0.0.0",
|
||||
newImageVersion: "v0.0.0",
|
||||
wantErr: true,
|
||||
},
|
||||
"update error": {
|
||||
updateErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
upgrader := &Upgrader{
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
nodeVersion := updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
ImageReference: tc.oldImageReference,
|
||||
ImageVersion: tc.oldImageVersion,
|
||||
},
|
||||
}
|
||||
|
||||
err := upgrader.updateImage(&nodeVersion, tc.newImageReference, tc.newImageVersion)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
if tc.wantUpdate {
|
||||
assert.Equal(tc.newImageReference, nodeVersion.Spec.ImageReference)
|
||||
assert.Equal(tc.newImageVersion, nodeVersion.Spec.ImageVersion)
|
||||
} else {
|
||||
assert.Equal(tc.oldImageReference, nodeVersion.Spec.ImageReference)
|
||||
assert.Equal(tc.oldImageVersion, nodeVersion.Spec.ImageVersion)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateK8s(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testCases := map[string]struct {
|
||||
newClusterVersion string
|
||||
oldClusterVersion string
|
||||
updateErr error
|
||||
wantUpdate bool
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
oldClusterVersion: "v0.0.0",
|
||||
newClusterVersion: "v0.1.0",
|
||||
wantUpdate: true,
|
||||
},
|
||||
"same version fails": {
|
||||
oldClusterVersion: "v0.0.0",
|
||||
newClusterVersion: "v0.0.0",
|
||||
wantErr: true,
|
||||
},
|
||||
"update error": {
|
||||
updateErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
upgrader := &Upgrader{
|
||||
log: logger.NewTest(t),
|
||||
}
|
||||
|
||||
nodeVersion := updatev1alpha1.NodeVersion{
|
||||
Spec: updatev1alpha1.NodeVersionSpec{
|
||||
KubernetesClusterVersion: tc.oldClusterVersion,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := upgrader.updateK8s(&nodeVersion, tc.newClusterVersion, components.Components{})
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
if tc.wantUpdate {
|
||||
assert.Equal(tc.newClusterVersion, nodeVersion.Spec.KubernetesClusterVersion)
|
||||
} else {
|
||||
assert.Equal(tc.oldClusterVersion, nodeVersion.Spec.KubernetesClusterVersion)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newJoinConfigMap(data string) *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.JoinConfigMap,
|
||||
},
|
||||
Data: map[string]string{
|
||||
constants.MeasurementsFilename: data,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type stubDynamicClient struct {
|
||||
object *unstructured.Unstructured
|
||||
updatedObject *unstructured.Unstructured
|
||||
getErr error
|
||||
updateErr error
|
||||
}
|
||||
|
||||
func (u *stubDynamicClient) GetCurrent(_ context.Context, _ string) (*unstructured.Unstructured, error) {
|
||||
return u.object, u.getErr
|
||||
}
|
||||
|
||||
func (u *stubDynamicClient) Update(_ context.Context, updatedObject *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
u.updatedObject = updatedObject
|
||||
return u.updatedObject, u.updateErr
|
||||
}
|
||||
|
||||
type stubStableClient struct {
|
||||
configMaps map[string]*corev1.ConfigMap
|
||||
updatedConfigMaps map[string]*corev1.ConfigMap
|
||||
k8sVersion string
|
||||
getErr error
|
||||
updateErr error
|
||||
createErr error
|
||||
k8sErr error
|
||||
}
|
||||
|
||||
func (s *stubStableClient) getCurrentConfigMap(_ context.Context, name string) (*corev1.ConfigMap, error) {
|
||||
return s.configMaps[name], s.getErr
|
||||
}
|
||||
|
||||
func (s *stubStableClient) updateConfigMap(_ context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
if s.updatedConfigMaps == nil {
|
||||
s.updatedConfigMaps = map[string]*corev1.ConfigMap{}
|
||||
}
|
||||
s.updatedConfigMaps[configMap.ObjectMeta.Name] = configMap
|
||||
return s.updatedConfigMaps[configMap.ObjectMeta.Name], s.updateErr
|
||||
}
|
||||
|
||||
func (s *stubStableClient) createConfigMap(_ context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) {
|
||||
if s.configMaps == nil {
|
||||
s.configMaps = map[string]*corev1.ConfigMap{}
|
||||
}
|
||||
s.configMaps[configMap.ObjectMeta.Name] = configMap
|
||||
return s.configMaps[configMap.ObjectMeta.Name], s.createErr
|
||||
}
|
||||
|
||||
func (s *stubStableClient) kubernetesVersion() (string, error) {
|
||||
return s.k8sVersion, s.k8sErr
|
||||
}
|
||||
|
||||
type stubImageFetcher struct {
|
||||
reference string
|
||||
fetchReferenceErr error
|
||||
}
|
||||
|
||||
func (f *stubImageFetcher) FetchReference(_ context.Context, _ *config.Config) (string, error) {
|
||||
return f.reference, f.fetchReferenceErr
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue