cli: install cilium in cli instead of bootstrapper (#2146)

* add wait and restartDS

* cilium working (tested on azure + gcp)

* clean helm code from bootstrapper

* fixup! clean helm code from bootstrapper

* fixup! clean helm code from bootstrapper

* fixup! clean helm code from bootstrapper

* add patchnode for gcp

* fix gcp

* patch node inside bootstrapper

* apply renaming of client

* fixup! apply renaming of client

* otto feedback
This commit is contained in:
Adrian Stobbe 2023-08-02 15:49:40 +02:00 committed by GitHub
parent da1376cd90
commit 13eea1ca31
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
36 changed files with 519 additions and 575 deletions

View file

@ -5,12 +5,14 @@ go_library(
name = "helm",
srcs = [
"backup.go",
"client.go",
"ciliumhelper.go",
"helm.go",
"helminstaller.go",
"init.go",
"install.go",
"loader.go",
"release.go",
"serviceversion.go",
"setup.go",
"upgrade.go",
"values.go",
],
embedsrcs = [
@ -427,17 +429,21 @@ go_library(
"//internal/compatibility",
"//internal/config",
"//internal/constants",
"//internal/deploy/helm",
"//internal/file",
"//internal/kms/uri",
"//internal/retry",
"//internal/semver",
"//internal/versions",
"@com_github_pkg_errors//:errors",
"@com_github_spf13_afero//:afero",
"@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:apiextensions",
"@io_k8s_apimachinery//pkg/api/errors",
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
"@io_k8s_apimachinery//pkg/runtime/schema",
"@io_k8s_apimachinery//pkg/util/wait",
"@io_k8s_client_go//kubernetes",
"@io_k8s_client_go//tools/clientcmd",
"@io_k8s_sigs_yaml//:yaml",
"@sh_helm_helm//pkg/ignore",
"@sh_helm_helm_v3//pkg/action",
@ -453,8 +459,9 @@ go_test(
name = "helm_test",
srcs = [
"backup_test.go",
"client_test.go",
"helm_test.go",
"loader_test.go",
"upgrade_test.go",
],
data = glob(["testdata/**"]),
embed = [":helm"],
@ -465,7 +472,6 @@ go_test(
"//internal/cloud/cloudprovider",
"//internal/compatibility",
"//internal/config",
"//internal/deploy/helm",
"//internal/file",
"//internal/logger",
"//internal/semver",

View file

@ -18,7 +18,7 @@ import (
"sigs.k8s.io/yaml"
)
func (c *Client) backupCRDs(ctx context.Context, upgradeID string) ([]apiextensionsv1.CustomResourceDefinition, error) {
func (c *UpgradeClient) backupCRDs(ctx context.Context, upgradeID string) ([]apiextensionsv1.CustomResourceDefinition, error) {
c.log.Debugf("Starting CRD backup")
crds, err := c.kubectl.GetCRDs(ctx)
if err != nil {
@ -53,7 +53,7 @@ func (c *Client) backupCRDs(ctx context.Context, upgradeID string) ([]apiextensi
return crds, nil
}
func (c *Client) backupCRs(ctx context.Context, crds []apiextensionsv1.CustomResourceDefinition, upgradeID string) error {
func (c *UpgradeClient) backupCRs(ctx context.Context, crds []apiextensionsv1.CustomResourceDefinition, upgradeID string) error {
c.log.Debugf("Starting CR backup")
for _, crd := range crds {
c.log.Debugf("Creating backup for resource type: %s", crd.Name)
@ -99,10 +99,10 @@ func (c *Client) backupCRs(ctx context.Context, crds []apiextensionsv1.CustomRes
return nil
}
func (c *Client) backupFolder(upgradeID string) string {
func (c *UpgradeClient) backupFolder(upgradeID string) string {
return filepath.Join(constants.UpgradeDir, upgradeID, "backups") + string(filepath.Separator)
}
func (c *Client) crdBackupFolder(upgradeID string) string {
func (c *UpgradeClient) crdBackupFolder(upgradeID string) string {
return filepath.Join(c.backupFolder(upgradeID), "crds") + string(filepath.Separator)
}

View file

@ -52,7 +52,7 @@ func TestBackupCRDs(t *testing.T) {
crd := apiextensionsv1.CustomResourceDefinition{}
err := yaml.Unmarshal([]byte(tc.crd), &crd)
require.NoError(err)
client := Client{
client := UpgradeClient{
config: nil,
kubectl: stubCrdClient{crds: []apiextensionsv1.CustomResourceDefinition{crd}, getCRDsError: tc.getCRDsError},
fs: file.NewHandler(memFs),
@ -143,7 +143,7 @@ func TestBackupCRs(t *testing.T) {
require := require.New(t)
memFs := afero.NewMemMapFs()
client := Client{
client := UpgradeClient{
config: nil,
kubectl: stubCrdClient{crs: []unstructured.Unstructured{tc.resource}, getCRsError: tc.getCRsError},
fs: file.NewHandler(memFs),

View file

@ -0,0 +1,72 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package helm
import (
"context"
"fmt"
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
type k8sDsClient struct {
clientset *kubernetes.Clientset
}
func newK8sCiliumHelper(kubeconfigPath string) (*k8sDsClient, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &k8sDsClient{clientset: clientset}, nil
}
// WaitForDS waits for a DaemonSet to become ready.
func (h *k8sDsClient) WaitForDS(ctx context.Context, namespace, name string, log debugLog) error {
for {
select {
case <-ctx.Done():
return fmt.Errorf("context expired before DaemonSet %q became ready", name)
default:
ds, err := h.clientset.AppsV1().DaemonSets(namespace).Get(ctx, name, v1.GetOptions{})
if err != nil {
return err
}
if ds.Status.NumberReady == ds.Status.DesiredNumberScheduled {
log.Debugf("DaemonSet %s is ready\n", name)
return nil
}
log.Debugf("Waiting for DaemonSet %s to become ready...\n", name)
time.Sleep(10 * time.Second)
}
}
}
// RestartDS restarts all pods of a DaemonSet by updating its template.
func (h *k8sDsClient) RestartDS(namespace, name string) error {
ds, err := h.clientset.AppsV1().DaemonSets(namespace).Get(context.Background(), name, v1.GetOptions{})
if err != nil {
return err
}
ds.Spec.Template.ObjectMeta.Annotations["restartTimestamp"] = fmt.Sprintf("%d", time.Now().Unix())
_, err = h.clientset.AppsV1().DaemonSets(namespace).Update(context.Background(), ds, v1.UpdateOptions{})
if err != nil {
return err
}
return nil
}

View file

@ -15,3 +15,25 @@ It is used by the CLI to:
- create local backups before running service upgrades
*/
package helm
// mergeMaps returns a new map that is the merger of it's inputs.
// Key collisions are resolved by taking the value of the second argument (map b).
// Taken from: https://github.com/helm/helm/blob/dbc6d8e20fe1d58d50e6ed30f09a04a77e4c68db/pkg/cli/values/options.go#L91-L108.
func mergeMaps(a, b map[string]any) map[string]any {
out := make(map[string]any, len(a))
for k, v := range a {
out[k] = v
}
for k, v := range b {
if v, ok := v.(map[string]any); ok {
if bv, ok := out[k]; ok {
if bv, ok := bv.(map[string]any); ok {
out[k] = mergeMaps(bv, v)
continue
}
}
}
out[k] = v
}
return out
}

View file

@ -0,0 +1,109 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package helm
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMergeMaps(t *testing.T) {
testCases := map[string]struct {
vals map[string]any
extraVals map[string]any
expected map[string]any
}{
"equal": {
vals: map[string]any{
"join-service": map[string]any{
"key1": "foo",
"key2": "bar",
},
},
extraVals: map[string]any{
"join-service": map[string]any{
"extraKey1": "extraFoo",
"extraKey2": "extraBar",
},
},
expected: map[string]any{
"join-service": map[string]any{
"key1": "foo",
"key2": "bar",
"extraKey1": "extraFoo",
"extraKey2": "extraBar",
},
},
},
"missing join-service extraVals": {
vals: map[string]any{
"join-service": map[string]any{
"key1": "foo",
"key2": "bar",
},
},
extraVals: map[string]any{
"extraKey1": "extraFoo",
"extraKey2": "extraBar",
},
expected: map[string]any{
"join-service": map[string]any{
"key1": "foo",
"key2": "bar",
},
"extraKey1": "extraFoo",
"extraKey2": "extraBar",
},
},
"missing join-service vals": {
vals: map[string]any{
"key1": "foo",
"key2": "bar",
},
extraVals: map[string]any{
"join-service": map[string]any{
"extraKey1": "extraFoo",
"extraKey2": "extraBar",
},
},
expected: map[string]any{
"key1": "foo",
"key2": "bar",
"join-service": map[string]any{
"extraKey1": "extraFoo",
"extraKey2": "extraBar",
},
},
},
"key collision": {
vals: map[string]any{
"join-service": map[string]any{
"key1": "foo",
},
},
extraVals: map[string]any{
"join-service": map[string]any{
"key1": "bar",
},
},
expected: map[string]any{
"join-service": map[string]any{
"key1": "bar",
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
newVals := mergeMaps(tc.vals, tc.extraVals)
assert.Equal(tc.expected, newVals)
})
}
}

View file

@ -1,100 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package helm
import (
"context"
"fmt"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
"github.com/edgelesssys/constellation/v2/internal/constants"
helminstaller "github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
)
// SuiteInstaller installs all Helm charts required for a constellation cluster.
type SuiteInstaller interface {
Install(ctx context.Context, provider cloudprovider.Provider, masterSecret uri.MasterSecret,
idFile clusterid.File,
serviceAccURI string, releases *helminstaller.Releases,
) error
}
type helmInstallationClient struct {
log debugLog
installer helmInstaller
}
// NewInstallationClient creates a new Helm installation client to install all Helm charts required for a constellation cluster.
func NewInstallationClient(log debugLog) (SuiteInstaller, error) {
installer, err := helminstaller.NewInstaller(constants.AdminConfFilename, log)
if err != nil {
return nil, fmt.Errorf("creating Helm installer: %w", err)
}
return &helmInstallationClient{log: log, installer: installer}, nil
}
func (h helmInstallationClient) Install(ctx context.Context, provider cloudprovider.Provider, masterSecret uri.MasterSecret,
idFile clusterid.File,
serviceAccURI string, releases *helminstaller.Releases,
) error {
serviceVals, err := setupMicroserviceVals(ctx, provider, masterSecret.Salt, idFile.UID, serviceAccURI)
if err != nil {
return fmt.Errorf("setting up microservice values: %w", err)
}
if err := h.installer.InstallChartWithValues(ctx, releases.ConstellationServices, serviceVals); err != nil {
return fmt.Errorf("installing microservices: %w", err)
}
h.log.Debugf("Installing cert-manager")
if err := h.installer.InstallChart(ctx, releases.CertManager); err != nil {
return fmt.Errorf("installing cert-manager: %w", err)
}
if releases.CSI != nil {
var csiVals map[string]any
if provider == cloudprovider.OpenStack {
creds, err := openstack.AccountKeyFromURI(serviceAccURI)
if err != nil {
return err
}
cinderIni := creds.CloudINI().CinderCSIConfiguration()
csiVals = map[string]any{
"cinder-config": map[string]any{
"secretData": cinderIni,
},
}
}
h.log.Debugf("Installing CSI deployments")
if err := h.installer.InstallChartWithValues(ctx, *releases.CSI, csiVals); err != nil {
return fmt.Errorf("installing CSI snapshot CRDs: %w", err)
}
}
if releases.AWSLoadBalancerController != nil {
h.log.Debugf("Installing AWS Load Balancer Controller")
if err := h.installer.InstallChart(ctx, *releases.AWSLoadBalancerController); err != nil {
return fmt.Errorf("installing AWS Load Balancer Controller: %w", err)
}
}
h.log.Debugf("Installing constellation operators")
operatorVals := setupOperatorVals(ctx, idFile.UID)
if err := h.installer.InstallChartWithValues(ctx, releases.ConstellationOperators, operatorVals); err != nil {
return fmt.Errorf("installing constellation operators: %w", err)
}
// TODO(elchead): AB#3301 do cilium after version upgrade
return nil
}
type helmInstaller interface {
InstallChart(context.Context, helminstaller.Release) error
InstallChartWithValues(ctx context.Context, release helminstaller.Release, extraValues map[string]any) error
}

258
cli/internal/helm/init.go Normal file
View file

@ -0,0 +1,258 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package helm
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"time"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
)
// Initializer installs all Helm charts required for a constellation cluster.
type Initializer interface {
Install(ctx context.Context, provider cloudprovider.Provider, masterSecret uri.MasterSecret,
idFile clusterid.File,
serviceAccURI string, releases *Releases,
) error
}
type initializationClient struct {
log debugLog
installer installer
}
// NewInitializer creates a new client to install all Helm charts required for a constellation cluster.
func NewInitializer(log debugLog) (Initializer, error) {
installer, err := NewInstaller(constants.AdminConfFilename, log)
if err != nil {
return nil, fmt.Errorf("creating Helm installer: %w", err)
}
return &initializationClient{log: log, installer: installer}, nil
}
// Install installs all Helm charts required for a constellation cluster.
func (h initializationClient) Install(ctx context.Context, provider cloudprovider.Provider, masterSecret uri.MasterSecret,
idFile clusterid.File,
serviceAccURI string, releases *Releases,
) error {
tfClient, err := terraform.New(ctx, constants.TerraformWorkingDir)
if err != nil {
return fmt.Errorf("creating Terraform client: %w", err)
}
output, err := tfClient.ShowCluster(ctx, provider)
if err != nil {
return fmt.Errorf("getting Terraform output: %w", err)
}
ciliumVals := setupCiliumVals(provider, output)
if err := h.installer.InstallChartWithValues(ctx, releases.Cilium, ciliumVals); err != nil {
return fmt.Errorf("installing Cilium: %w", err)
}
h.log.Debugf("Waiting for Cilium to become ready")
helper, err := newK8sCiliumHelper(constants.AdminConfFilename)
if err != nil {
return fmt.Errorf("creating Kubernetes client: %w", err)
}
timeToStartWaiting := time.Now()
// TODO(3u13r): Reduce the timeout when we switched the package repository - this is only this high because we once
// saw polling times of ~16 minutes when hitting a slow PoP from Fastly (GitHub's / ghcr.io CDN).
if err := helper.WaitForDS(ctx, "kube-system", "cilium", h.log); err != nil {
return fmt.Errorf("waiting for Cilium to become healthy: %w", err)
}
timeUntilFinishedWaiting := time.Since(timeToStartWaiting)
h.log.Debugf("Cilium became healthy after %s", timeUntilFinishedWaiting.String())
h.log.Debugf("Fix Cilium through restart")
if err := helper.RestartDS("kube-system", "cilium"); err != nil {
return fmt.Errorf("restarting Cilium: %w", err)
}
h.log.Debugf("Installing microservices")
serviceVals, err := setupMicroserviceVals(provider, masterSecret.Salt, idFile.UID, serviceAccURI, output)
if err != nil {
return fmt.Errorf("setting up microservice values: %w", err)
}
if err := h.installer.InstallChartWithValues(ctx, releases.ConstellationServices, serviceVals); err != nil {
return fmt.Errorf("installing microservices: %w", err)
}
h.log.Debugf("Installing cert-manager")
if err := h.installer.InstallChart(ctx, releases.CertManager); err != nil {
return fmt.Errorf("installing cert-manager: %w", err)
}
if releases.CSI != nil {
var csiVals map[string]any
if provider == cloudprovider.OpenStack {
creds, err := openstack.AccountKeyFromURI(serviceAccURI)
if err != nil {
return err
}
cinderIni := creds.CloudINI().CinderCSIConfiguration()
csiVals = map[string]any{
"cinder-config": map[string]any{
"secretData": cinderIni,
},
}
}
h.log.Debugf("Installing CSI deployments")
if err := h.installer.InstallChartWithValues(ctx, *releases.CSI, csiVals); err != nil {
return fmt.Errorf("installing CSI snapshot CRDs: %w", err)
}
}
if releases.AWSLoadBalancerController != nil {
h.log.Debugf("Installing AWS Load Balancer Controller")
if err := h.installer.InstallChart(ctx, *releases.AWSLoadBalancerController); err != nil {
return fmt.Errorf("installing AWS Load Balancer Controller: %w", err)
}
}
h.log.Debugf("Installing constellation operators")
operatorVals := setupOperatorVals(ctx, idFile.UID)
if err := h.installer.InstallChartWithValues(ctx, releases.ConstellationOperators, operatorVals); err != nil {
return fmt.Errorf("installing constellation operators: %w", err)
}
return nil
}
// installer is the interface for installing a single Helm chart.
type installer interface {
InstallChart(context.Context, Release) error
InstallChartWithValues(ctx context.Context, release Release, extraValues map[string]any) error
}
// TODO(malt3): switch over to DNS name on AWS and Azure
// soon as every apiserver certificate of every control-plane node
// has the dns endpoint in its SAN list.
func setupCiliumVals(provider cloudprovider.Provider, output terraform.ApplyOutput) map[string]any {
vals := map[string]any{
"k8sServiceHost": output.IP,
"k8sServicePort": constants.KubernetesPort,
}
if provider == cloudprovider.GCP {
vals["ipv4NativeRoutingCIDR"] = output.GCP.IPCidrPod
vals["strictModeCIDR"] = output.GCP.IPCidrPod
}
return vals
}
// setupMicroserviceVals returns the values for the microservice chart.
func setupMicroserviceVals(provider cloudprovider.Provider, measurementSalt []byte, uid, serviceAccURI string, output terraform.ApplyOutput) (map[string]any, error) {
extraVals := map[string]any{
"join-service": map[string]any{
"measurementSalt": base64.StdEncoding.EncodeToString(measurementSalt),
},
"verification-service": map[string]any{
"loadBalancerIP": output.IP,
},
"konnectivity": map[string]any{
"loadBalancerIP": output.IP,
},
}
switch provider {
case cloudprovider.GCP:
serviceAccountKey, err := gcpshared.ServiceAccountKeyFromURI(serviceAccURI)
if err != nil {
return nil, fmt.Errorf("getting service account key: %w", err)
}
rawKey, err := json.Marshal(serviceAccountKey)
if err != nil {
return nil, fmt.Errorf("marshaling service account key: %w", err)
}
if output.GCP == nil {
return nil, fmt.Errorf("no GCP output from Terraform")
}
extraVals["ccm"] = map[string]any{
"GCP": map[string]any{
"projectID": output.GCP.ProjectID,
"uid": uid,
"secretData": string(rawKey),
"subnetworkPodCIDR": output.GCP.IPCidrPod,
},
}
case cloudprovider.Azure:
if output.Azure == nil {
return nil, fmt.Errorf("no Azure output from Terraform")
}
ccmConfig, err := getCCMConfig(*output.Azure, serviceAccURI)
if err != nil {
return nil, fmt.Errorf("getting Azure CCM config: %w", err)
}
extraVals["ccm"] = map[string]any{
"Azure": map[string]any{
"azureConfig": string(ccmConfig),
},
}
}
return extraVals, nil
}
// setupOperatorVals returns the values for the constellation-operator chart.
func setupOperatorVals(_ context.Context, uid string) map[string]any {
return map[string]any{
"constellation-operator": map[string]any{
"constellationUID": uid,
},
}
}
type cloudConfig struct {
Cloud string `json:"cloud,omitempty"`
TenantID string `json:"tenantId,omitempty"`
SubscriptionID string `json:"subscriptionId,omitempty"`
ResourceGroup string `json:"resourceGroup,omitempty"`
Location string `json:"location,omitempty"`
SubnetName string `json:"subnetName,omitempty"`
SecurityGroupName string `json:"securityGroupName,omitempty"`
SecurityGroupResourceGroup string `json:"securityGroupResourceGroup,omitempty"`
LoadBalancerName string `json:"loadBalancerName,omitempty"`
LoadBalancerSku string `json:"loadBalancerSku,omitempty"`
VNetName string `json:"vnetName,omitempty"`
VNetResourceGroup string `json:"vnetResourceGroup,omitempty"`
CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"`
UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty"`
VMType string `json:"vmType,omitempty"`
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty"`
UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty"`
}
// GetCCMConfig returns the configuration needed for the Kubernetes Cloud Controller Manager on Azure.
func getCCMConfig(tfOutput terraform.AzureApplyOutput, serviceAccURI string) ([]byte, error) {
creds, err := azureshared.ApplicationCredentialsFromURI(serviceAccURI)
if err != nil {
return nil, fmt.Errorf("getting service account key: %w", err)
}
useManagedIdentityExtension := creds.PreferredAuthMethod == azureshared.AuthMethodUserAssignedIdentity
config := cloudConfig{
Cloud: "AzurePublicCloud",
TenantID: creds.TenantID,
SubscriptionID: tfOutput.SubscriptionID,
ResourceGroup: tfOutput.ResourceGroup,
LoadBalancerSku: "standard",
SecurityGroupName: tfOutput.NetworkSecurityGroupName,
LoadBalancerName: tfOutput.LoadBalancerName,
UseInstanceMetadata: true,
VMType: "vmss",
Location: creds.Location,
UseManagedIdentityExtension: useManagedIdentityExtension,
UserAssignedIdentityID: tfOutput.UserAssignedIdentity,
}
return json.Marshal(config)
}

View file

@ -0,0 +1,160 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package helm
import (
"bytes"
"context"
"fmt"
"strings"
"time"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/retry"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
// timeout is the maximum time given to the helm Installer.
timeout = 10 * time.Minute
// maximumRetryAttempts is the maximum number of attempts to retry a helm install.
maximumRetryAttempts = 3
)
type debugLog interface {
Debugf(format string, args ...any)
Sync()
}
// Installer is a wrapper for a helm install action.
type Installer struct {
*action.Install
log debugLog
}
// NewInstaller creates a new Installer with the given logger.
func NewInstaller(kubeconfig string, logger debugLog) (*Installer, error) {
settings := cli.New()
settings.KubeConfig = kubeconfig
actionConfig := &action.Configuration{}
if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace,
"secret", logger.Debugf); err != nil {
return nil, err
}
action := action.NewInstall(actionConfig)
action.Namespace = constants.HelmNamespace
action.Timeout = timeout
return &Installer{
Install: action,
log: logger,
}, nil
}
// InstallChart is the generic install function for helm charts.
func (h *Installer) InstallChart(ctx context.Context, release Release) error {
return h.InstallChartWithValues(ctx, release, nil)
}
// InstallChartWithValues is the generic install function for helm charts with custom values.
func (h *Installer) InstallChartWithValues(ctx context.Context, release Release, extraValues map[string]any) error {
mergedVals := mergeMaps(release.Values, extraValues)
h.ReleaseName = release.ReleaseName
if err := h.SetWaitMode(release.WaitMode); err != nil {
return err
}
return h.install(ctx, release.Chart, mergedVals)
}
// install tries to install the given chart and aborts after ~5 tries.
// The function will wait 30 seconds before retrying a failed installation attempt.
// After 3 tries, the retrier will be canceled and the function returns with an error.
func (h *Installer) install(ctx context.Context, chartRaw []byte, values map[string]any) error {
var retries int
retriable := func(err error) bool {
// abort after maximumRetryAttempts tries.
if retries >= maximumRetryAttempts {
return false
}
retries++
// only retry if atomic is set
// otherwise helm doesn't uninstall
// the release on failure
if !h.Atomic {
return false
}
// check if error is retriable
return wait.Interrupted(err) ||
strings.Contains(err.Error(), "connection refused")
}
reader := bytes.NewReader(chartRaw)
chart, err := loader.LoadArchive(reader)
if err != nil {
return fmt.Errorf("helm load archive: %w", err)
}
doer := installDoer{
h,
chart,
values,
h.log,
}
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, retriable)
retryLoopStartTime := time.Now()
if err := retrier.Do(ctx); err != nil {
return fmt.Errorf("helm install: %w", err)
}
retryLoopFinishDuration := time.Since(retryLoopStartTime)
h.log.Debugf("Helm chart %q installation finished after %s", chart.Name(), retryLoopFinishDuration)
return nil
}
// SetWaitMode sets the wait mode of the installer.
func (h *Installer) SetWaitMode(waitMode WaitMode) error {
switch waitMode {
case WaitModeNone:
h.Wait = false
h.Atomic = false
case WaitModeWait:
h.Wait = true
h.Atomic = false
case WaitModeAtomic:
h.Wait = true
h.Atomic = true
default:
return fmt.Errorf("unknown wait mode %q", waitMode)
}
return nil
}
// installDoer is a help struct to enable retrying helm's install action.
type installDoer struct {
Installer *Installer
chart *chart.Chart
values map[string]any
log debugLog
}
// Do logs which chart is installed and tries to install it.
func (i installDoer) Do(ctx context.Context) error {
i.log.Debugf("Trying to install Helm chart %s", i.chart.Name())
if _, err := i.Installer.RunWithContext(ctx, i.chart, i.values); err != nil {
i.log.Debugf("Helm chart installation % failed: %v", i.chart.Name(), err)
return err
}
return nil
}

View file

@ -27,7 +27,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/edgelesssys/constellation/v2/internal/versions"
)
@ -108,21 +107,8 @@ func NewLoader(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion,
}
}
// Load the embedded helm charts.
func (i *ChartLoader) Load(config *config.Config, conformanceMode bool, helmWaitMode helm.WaitMode, masterSecret, salt []byte) ([]byte, error) {
releases, err := i.LoadReleases(config, conformanceMode, helmWaitMode, masterSecret, salt)
if err != nil {
return nil, fmt.Errorf("loading releases: %w", err)
}
rel, err := json.Marshal(releases)
if err != nil {
return nil, err
}
return rel, nil
}
// LoadReleases loads the embedded helm charts and returns them as a HelmReleases object.
func (i *ChartLoader) LoadReleases(config *config.Config, conformanceMode bool, helmWaitMode helm.WaitMode, masterSecret, salt []byte) (*helm.Releases, error) {
func (i *ChartLoader) LoadReleases(config *config.Config, conformanceMode bool, helmWaitMode WaitMode, masterSecret, salt []byte) (*Releases, error) {
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
if err != nil {
return nil, fmt.Errorf("loading cilium: %w", err)
@ -147,7 +133,7 @@ func (i *ChartLoader) LoadReleases(config *config.Config, conformanceMode bool,
return nil, fmt.Errorf("extending constellation-services values: %w", err)
}
releases := helm.Releases{Cilium: ciliumRelease, CertManager: certManagerRelease, ConstellationOperators: operatorRelease, ConstellationServices: conServicesRelease}
releases := Releases{Cilium: ciliumRelease, CertManager: certManagerRelease, ConstellationOperators: operatorRelease, ConstellationServices: conServicesRelease}
if config.HasProvider(cloudprovider.AWS) {
awsRelease, err := i.loadRelease(awsLBControllerInfo, helmWaitMode)
if err != nil {
@ -168,10 +154,10 @@ func (i *ChartLoader) LoadReleases(config *config.Config, conformanceMode bool,
// loadRelease loads the embedded chart and values depending on the given info argument.
// IMPORTANT: .helmignore rules specifying files in subdirectories are not applied (e.g. crds/kustomization.yaml).
func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode helm.WaitMode) (helm.Release, error) {
func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode WaitMode) (Release, error) {
chart, err := loadChartsDir(helmFS, info.path)
if err != nil {
return helm.Release{}, fmt.Errorf("loading %s chart: %w", info.releaseName, err)
return Release{}, fmt.Errorf("loading %s chart: %w", info.releaseName, err)
}
var values map[string]any
@ -181,7 +167,7 @@ func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode helm.WaitMode) (h
var ok bool
values, ok = ciliumVals[i.csp.String()]
if !ok {
return helm.Release{}, fmt.Errorf("cilium values for csp %q not found", i.csp.String())
return Release{}, fmt.Errorf("cilium values for csp %q not found", i.csp.String())
}
case certManagerInfo.releaseName:
values = i.loadCertManagerValues()
@ -200,10 +186,10 @@ func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode helm.WaitMode) (h
chartRaw, err := i.marshalChart(chart)
if err != nil {
return helm.Release{}, fmt.Errorf("packaging %s chart: %w", info.releaseName, err)
return Release{}, fmt.Errorf("packaging %s chart: %w", info.releaseName, err)
}
return helm.Release{Chart: chartRaw, Values: values, ReleaseName: info.releaseName, WaitMode: helmWaitMode}, nil
return Release{Chart: chartRaw, Values: values, ReleaseName: info.releaseName, WaitMode: helmWaitMode}, nil
}
func (i *ChartLoader) loadAWSLBControllerValues() map[string]any {

View file

@ -8,7 +8,6 @@ package helm
import (
"bytes"
"encoding/json"
"fmt"
"io/fs"
"os"
@ -29,21 +28,15 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
)
// TestLoad checks if the serialized format that Load returns correctly preserves the dependencies of the loaded chart.
func TestLoad(t *testing.T) {
func TestLoadReleases(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
config := &config.Config{Provider: config.ProviderConfig{GCP: &config.GCPConfig{}}}
chartLoader := ChartLoader{csp: config.GetProvider()}
release, err := chartLoader.Load(config, true, helm.WaitModeAtomic, []byte("secret"), []byte("salt"))
require.NoError(err)
var helmReleases helm.Releases
err = json.Unmarshal(release, &helmReleases)
helmReleases, err := chartLoader.LoadReleases(config, true, WaitModeAtomic, []byte("secret"), []byte("salt"))
require.NoError(err)
reader := bytes.NewReader(helmReleases.ConstellationServices.Chart)
chart, err := loader.LoadArchive(reader)

View file

@ -0,0 +1,39 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
// Package helm provides types and functions shared across services.
package helm
// Release bundles all information necessary to create a helm release.
type Release struct {
Chart []byte
Values map[string]any
ReleaseName string
WaitMode WaitMode
}
// Releases bundles all helm releases to be deployed to Constellation.
type Releases struct {
AWSLoadBalancerController *Release
CSI *Release
Cilium Release
CertManager Release
ConstellationOperators Release
ConstellationServices Release
}
// WaitMode specifies the wait mode for a helm release.
type WaitMode string
const (
// WaitModeNone specifies that the helm release should not wait for the resources to be ready.
WaitModeNone WaitMode = ""
// WaitModeWait specifies that the helm release should wait for the resources to be ready.
WaitModeWait WaitMode = "wait"
// WaitModeAtomic specifies that the helm release should
// wait for the resources to be ready and roll back atomically on failure.
WaitModeAtomic WaitMode = "atomic"
)

View file

@ -1,134 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package helm
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
"github.com/edgelesssys/constellation/v2/internal/constants"
)
// setupMicroserviceVals returns the values for the microservice chart.
func setupMicroserviceVals(ctx context.Context, provider cloudprovider.Provider, measurementSalt []byte, uid, serviceAccURI string) (map[string]any, error) {
tfClient, err := terraform.New(ctx, constants.TerraformWorkingDir)
if err != nil {
return nil, fmt.Errorf("creating Terraform client: %w", err)
}
output, err := tfClient.ShowCluster(ctx, provider)
if err != nil {
return nil, fmt.Errorf("getting Terraform output: %w", err)
}
extraVals := map[string]any{
"join-service": map[string]any{
"measurementSalt": base64.StdEncoding.EncodeToString(measurementSalt),
},
"verification-service": map[string]any{
"loadBalancerIP": output.IP,
},
"konnectivity": map[string]any{
"loadBalancerIP": output.IP,
},
}
switch provider {
case cloudprovider.GCP:
serviceAccountKey, err := gcpshared.ServiceAccountKeyFromURI(serviceAccURI)
if err != nil {
return nil, fmt.Errorf("getting service account key: %w", err)
}
rawKey, err := json.Marshal(serviceAccountKey)
if err != nil {
return nil, fmt.Errorf("marshaling service account key: %w", err)
}
if output.GCP == nil {
return nil, fmt.Errorf("no GCP output from Terraform")
}
extraVals["ccm"] = map[string]any{
"GCP": map[string]any{
"projectID": output.GCP.ProjectID,
"uid": uid,
"secretData": string(rawKey),
"subnetworkPodCIDR": output.GCP.IPCidrPod,
},
}
case cloudprovider.Azure:
if output.Azure == nil {
return nil, fmt.Errorf("no Azure output from Terraform")
}
ccmConfig, err := getCCMConfig(*output.Azure, serviceAccURI)
if err != nil {
return nil, fmt.Errorf("getting Azure CCM config: %w", err)
}
extraVals["ccm"] = map[string]any{
"Azure": map[string]any{
"azureConfig": string(ccmConfig),
},
}
}
return extraVals, nil
}
// setupOperatorVals returns the values for the constellation-operator chart.
func setupOperatorVals(_ context.Context, uid string) map[string]any {
return map[string]any{
"constellation-operator": map[string]any{
"constellationUID": uid,
},
}
}
type cloudConfig struct {
Cloud string `json:"cloud,omitempty"`
TenantID string `json:"tenantId,omitempty"`
SubscriptionID string `json:"subscriptionId,omitempty"`
ResourceGroup string `json:"resourceGroup,omitempty"`
Location string `json:"location,omitempty"`
SubnetName string `json:"subnetName,omitempty"`
SecurityGroupName string `json:"securityGroupName,omitempty"`
SecurityGroupResourceGroup string `json:"securityGroupResourceGroup,omitempty"`
LoadBalancerName string `json:"loadBalancerName,omitempty"`
LoadBalancerSku string `json:"loadBalancerSku,omitempty"`
VNetName string `json:"vnetName,omitempty"`
VNetResourceGroup string `json:"vnetResourceGroup,omitempty"`
CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"`
UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty"`
VMType string `json:"vmType,omitempty"`
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty"`
UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty"`
}
// GetCCMConfig returns the configuration needed for the Kubernetes Cloud Controller Manager on Azure.
func getCCMConfig(tfOutput terraform.AzureApplyOutput, serviceAccURI string) ([]byte, error) {
creds, err := azureshared.ApplicationCredentialsFromURI(serviceAccURI)
if err != nil {
return nil, fmt.Errorf("getting service account key: %w", err)
}
useManagedIdentityExtension := creds.PreferredAuthMethod == azureshared.AuthMethodUserAssignedIdentity
config := cloudConfig{
Cloud: "AzurePublicCloud",
TenantID: creds.TenantID,
SubscriptionID: tfOutput.SubscriptionID,
ResourceGroup: tfOutput.ResourceGroup,
LoadBalancerSku: "standard",
SecurityGroupName: tfOutput.NetworkSecurityGroupName,
LoadBalancerName: tfOutput.LoadBalancerName,
UseInstanceMetadata: true,
VMType: "vmss",
Location: creds.Location,
UseManagedIdentityExtension: useManagedIdentityExtension,
UserAssignedIdentityID: tfOutput.UserAssignedIdentity,
}
return json.Marshal(config)
}

View file

@ -13,20 +13,20 @@ import (
"strings"
"time"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/release"
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/spf13/afero"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/release"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -44,8 +44,8 @@ var ErrConfirmationMissing = errors.New("action requires user confirmation")
var errReleaseNotFound = errors.New("release not found")
// Client handles interaction with helm and the cluster.
type Client struct {
// UpgradeClient handles interaction with helm and the cluster.
type UpgradeClient struct {
config *action.Configuration
kubectl crdClient
fs file.Handler
@ -53,8 +53,8 @@ type Client struct {
log debugLog
}
// NewClient returns a new initializes client for the namespace Client.
func NewClient(client crdClient, kubeConfigPath, helmNamespace string, log debugLog) (*Client, error) {
// NewUpgradeClient returns a new initializes upgrade client for the given namespace.
func NewUpgradeClient(client crdClient, kubeConfigPath, helmNamespace string, log debugLog) (*UpgradeClient, error) {
settings := cli.New()
settings.KubeConfig = kubeConfigPath // constants.AdminConfFilename
@ -74,10 +74,10 @@ func NewClient(client crdClient, kubeConfigPath, helmNamespace string, log debug
return nil, fmt.Errorf("initializing kubectl: %w", err)
}
return &Client{kubectl: client, fs: fileHandler, actions: actions{config: actionConfig}, log: log}, nil
return &UpgradeClient{kubectl: client, fs: fileHandler, actions: actions{config: actionConfig}, log: log}, nil
}
func (c *Client) shouldUpgrade(releaseName string, newVersion semver.Semver, force bool) error {
func (c *UpgradeClient) shouldUpgrade(releaseName string, newVersion semver.Semver, force bool) error {
currentVersion, err := c.currentVersion(releaseName)
if err != nil {
return fmt.Errorf("getting version for %s: %w", releaseName, err)
@ -107,7 +107,7 @@ func (c *Client) shouldUpgrade(releaseName string, newVersion semver.Semver, for
// Upgrade runs a helm-upgrade on all deployments that are managed via Helm.
// If the CLI receives an interrupt signal it will cancel the context.
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
func (c *Client) Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string) error {
func (c *UpgradeClient) Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string) error {
upgradeErrs := []error{}
upgradeReleases := []*chart.Chart{}
newReleases := []*chart.Chart{}
@ -206,7 +206,7 @@ func getManagedCharts(config *config.Config) []chartInfo {
}
// Versions queries the cluster for running versions and returns a map of releaseName -> version.
func (c *Client) Versions() (ServiceVersions, error) {
func (c *UpgradeClient) Versions() (ServiceVersions, error) {
ciliumVersion, err := c.currentVersion(ciliumInfo.releaseName)
if err != nil {
return ServiceVersions{}, fmt.Errorf("getting %s version: %w", ciliumInfo.releaseName, err)
@ -246,7 +246,7 @@ func (c *Client) Versions() (ServiceVersions, error) {
}
// currentVersion returns the version of the currently installed helm release.
func (c *Client) currentVersion(release string) (semver.Semver, error) {
func (c *UpgradeClient) currentVersion(release string) (semver.Semver, error) {
rel, err := c.actions.listAction(release)
if err != nil {
return semver.Semver{}, err
@ -266,7 +266,7 @@ func (c *Client) currentVersion(release string) (semver.Semver, error) {
return semver.New(rel[0].Chart.Metadata.Version)
}
func (c *Client) csiVersions() (map[string]semver.Semver, error) {
func (c *UpgradeClient) csiVersions() (map[string]semver.Semver, error) {
packedChartRelease, err := c.actions.listAction(csiInfo.releaseName)
if err != nil {
return nil, fmt.Errorf("listing %s: %w", csiInfo.releaseName, err)
@ -299,7 +299,7 @@ func (c *Client) csiVersions() (map[string]semver.Semver, error) {
}
// installNewRelease installs a previously not installed release on the cluster.
func (c *Client) installNewRelease(
func (c *UpgradeClient) installNewRelease(
ctx context.Context, timeout time.Duration, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
) error {
releaseName, values, err := c.loadUpgradeValues(ctx, conf, idFile, chart)
@ -310,7 +310,7 @@ func (c *Client) installNewRelease(
}
// upgradeRelease upgrades a release running on the cluster.
func (c *Client) upgradeRelease(
func (c *UpgradeClient) upgradeRelease(
ctx context.Context, timeout time.Duration, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
) error {
releaseName, values, err := c.loadUpgradeValues(ctx, conf, idFile, chart)
@ -327,7 +327,7 @@ func (c *Client) upgradeRelease(
}
// loadUpgradeValues loads values for a chart required for running an upgrade.
func (c *Client) loadUpgradeValues(ctx context.Context, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
func (c *UpgradeClient) loadUpgradeValues(ctx context.Context, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
) (string, map[string]any, error) {
// We need to load all values that can be statically loaded before merging them with the cluster
// values. Otherwise the templates are not rendered correctly.
@ -385,7 +385,7 @@ func (c *Client) loadUpgradeValues(ctx context.Context, conf *config.Config, idF
// applyMigrations checks the from version and applies the necessary migrations.
// The function assumes the caller has verified that our version drift restriction is not violated,
// Currently, this is done during config validation.
func (c *Client) applyMigrations(ctx context.Context, releaseName string, values map[string]any, conf *config.Config) error {
func (c *UpgradeClient) applyMigrations(ctx context.Context, releaseName string, values map[string]any, conf *config.Config) error {
current, err := c.currentVersion(releaseName)
if err != nil {
return fmt.Errorf("getting %s version: %w", releaseName, err)
@ -413,7 +413,7 @@ func migrateFrom2_8(_ context.Context, _ map[string]any, _ *config.Config, _ crd
// and merging the fetched values with the locally found values.
// This is done to ensure that new values (from upgrades of the local files) end up in the cluster.
// reuse-values does not ensure this.
func (c *Client) mergeClusterValues(localValues map[string]any, releaseName string) (map[string]any, error) {
func (c *UpgradeClient) mergeClusterValues(localValues map[string]any, releaseName string) (map[string]any, error) {
// Ensure installCRDs is set for cert-manager chart.
if releaseName == certManagerInfo.releaseName {
localValues["installCRDs"] = true
@ -423,11 +423,11 @@ func (c *Client) mergeClusterValues(localValues map[string]any, releaseName stri
return nil, fmt.Errorf("getting values for %s: %w", releaseName, err)
}
return helm.MergeMaps(clusterValues, localValues), nil
return mergeMaps(clusterValues, localValues), nil
}
// GetValues queries the cluster for the values of the given release.
func (c *Client) GetValues(release string) (map[string]any, error) {
func (c *UpgradeClient) GetValues(release string) (map[string]any, error) {
client := action.NewGetValues(c.config)
// Version corresponds to the releases revision. Specifying a Version <= 0 yields the latest release.
client.Version = 0
@ -441,7 +441,7 @@ func (c *Client) GetValues(release string) (map[string]any, error) {
// updateCRDs walks through the dependencies of the given chart and applies
// the files in the dependencie's 'crds' folder.
// This function is NOT recursive!
func (c *Client) updateCRDs(ctx context.Context, chart *chart.Chart) error {
func (c *UpgradeClient) updateCRDs(ctx context.Context, chart *chart.Chart) error {
for _, dep := range chart.Dependencies() {
for _, crdFile := range dep.Files {
if strings.HasPrefix(crdFile.Name, "crds/") {
@ -456,11 +456,6 @@ func (c *Client) updateCRDs(ctx context.Context, chart *chart.Chart) error {
return nil
}
type debugLog interface {
Debugf(format string, args ...any)
Sync()
}
type crdClient interface {
Initialize(kubeconfig []byte) error
ApplyCRD(ctx context.Context, rawCRD []byte) error

View file

@ -46,7 +46,7 @@ func TestShouldUpgrade(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
client := Client{kubectl: nil, actions: &stubActionWrapper{version: tc.version}, log: logger.NewTest(t)}
client := UpgradeClient{kubectl: nil, actions: &stubActionWrapper{version: tc.version}, log: logger.NewTest(t)}
chart, err := loadChartsDir(helmFS, certManagerInfo.path)
require.NoError(err)
@ -77,7 +77,7 @@ func TestUpgradeRelease(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
client := Client{kubectl: nil, actions: &stubActionWrapper{version: tc.version}, log: logger.NewTest(t)}
client := UpgradeClient{kubectl: nil, actions: &stubActionWrapper{version: tc.version}, log: logger.NewTest(t)}
chart, err := loadChartsDir(helmFS, certManagerInfo.path)
require.NoError(err)