mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-03-26 00:28:20 -04:00
aws: use new LB controller to fix SecurityGroup cleanup on K8s service deletion (#2090)
* add current chart add current helm chart * disable service controller for aws ccm * add new iam roles * doc AWS internet LB + add to LB test * pass clusterName to helm for AWS LB * fix update-aws-lb chart to also include .helmignore * move chart outside services * working state * add subnet tags for AWS subnet discovery * fix .helmignore load rule with file in subdirectory * upgrade iam profile * revert new loader impl since cilium is not correctly loaded * install chart if not already present during `upgrade apply` * cleanup PR + fix build + add todos cleanup PR + add todos * shared helm pkg for cli install and bootstrapper * add link to eks docs * refactor iamMigrationCmd * delete unused helm.symwallk * move iammigrate to upgrade pkg * fixup! delete unused helm.symwallk * add to upgradecheck * remove nodeSelector from go code (Otto) * update iam docs and sort permission + remove duplicate roles * fix bug in `upgrade check` * better upgrade check output when svc version upgrade not possible * pr feedback * remove force flag in upgrade_test * use upgrader.GetUpgradeID instead of extra type * remove todos + fix check * update doc lb (leo) * remove bootstrapper helm package * Update cli/internal/cmd/upgradecheck.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * final nits * add docs for e2e upgrade test setup * Apply suggestions from code review Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * Update cli/internal/helm/loader.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * Update cli/internal/cmd/tfmigrationclient.go Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> * fix daniel review * link to the iam permissions instead of manually updating them (agreed with leo) * disable iam upgrade in upgrade apply --------- Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com> Co-authored-by: Malte Poll
This commit is contained in:
parent
8da6a23aa5
commit
a87b7894db
2
.github/actions/e2e_lb/lb.yml
vendored
2
.github/actions/e2e_lb/lb.yml
vendored
@ -3,6 +3,8 @@ kind: Service
|
||||
metadata:
|
||||
name: whoami
|
||||
namespace: lb-test
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
spec:
|
||||
selector:
|
||||
app: whoami
|
||||
|
@ -13,7 +13,6 @@ go_library(
|
||||
deps = [
|
||||
"//bootstrapper/internal/clean",
|
||||
"//bootstrapper/internal/diskencryption",
|
||||
"//bootstrapper/internal/helm",
|
||||
"//bootstrapper/internal/initserver",
|
||||
"//bootstrapper/internal/joinclient",
|
||||
"//bootstrapper/internal/kubernetes",
|
||||
@ -36,6 +35,7 @@ go_library(
|
||||
"//internal/cloud/openstack",
|
||||
"//internal/cloud/qemu",
|
||||
"//internal/constants",
|
||||
"//internal/deploy/helm",
|
||||
"//internal/file",
|
||||
"//internal/grpc/dialer",
|
||||
"//internal/kubernetes/kubectl",
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes"
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubewaiter"
|
||||
@ -33,6 +32,7 @@ import (
|
||||
openstackcloud "github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
|
||||
qemucloud "github.com/edgelesssys/constellation/v2/internal/cloud/qemu"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
@ -67,7 +67,7 @@ func main() {
|
||||
var openDevice vtpm.TPMOpenFunc
|
||||
var fs afero.Fs
|
||||
|
||||
helmClient, err := helm.New(log)
|
||||
helmClient, err := helm.NewInstaller(log, constants.ControlPlaneAdminConfFilename)
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Fatalf("Helm client could not be initialized")
|
||||
}
|
||||
|
@ -1,29 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "helm",
|
||||
srcs = ["helm.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/helm",
|
||||
visibility = ["//bootstrapper:__subpackages__"],
|
||||
deps = [
|
||||
"//bootstrapper/internal/kubernetes/k8sapi",
|
||||
"//internal/constants",
|
||||
"//internal/deploy/helm",
|
||||
"//internal/logger",
|
||||
"//internal/retry",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apimachinery//pkg/util/wait",
|
||||
"@org_uber_go_zap//:zap",
|
||||
"@sh_helm_helm_v3//pkg/action",
|
||||
"@sh_helm_helm_v3//pkg/chart",
|
||||
"@sh_helm_helm_v3//pkg/chart/loader",
|
||||
"@sh_helm_helm_v3//pkg/cli",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "helm_test",
|
||||
srcs = ["helm_test.go"],
|
||||
embed = [":helm"],
|
||||
)
|
@ -1,225 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// Package helm is used to install Constellation microservices and other services during cluster initialization.
|
||||
package helm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/retry"
|
||||
"go.uber.org/zap"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
// timeout is the maximum time given to the helm client.
|
||||
timeout = 10 * time.Minute
|
||||
// maximumRetryAttempts is the maximum number of attempts to retry a helm install.
|
||||
maximumRetryAttempts = 3
|
||||
)
|
||||
|
||||
// Client is used to install microservice during cluster initialization. It is a wrapper for a helm install action.
|
||||
type Client struct {
|
||||
*action.Install
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// New creates a new client with the given logger.
|
||||
func New(log *logger.Logger) (*Client, error) {
|
||||
settings := cli.New()
|
||||
settings.KubeConfig = constants.ControlPlaneAdminConfFilename
|
||||
|
||||
actionConfig := &action.Configuration{}
|
||||
if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace,
|
||||
"secret", log.Infof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
action := action.NewInstall(actionConfig)
|
||||
action.Namespace = constants.HelmNamespace
|
||||
action.Timeout = timeout
|
||||
|
||||
return &Client{
|
||||
action,
|
||||
log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InstallChart installs a helm chart, optionally merging extraVals into the values of the chart.
|
||||
func (h *Client) InstallChart(ctx context.Context, release helm.Release, extraVals map[string]any) error {
|
||||
h.ReleaseName = release.ReleaseName
|
||||
if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mergedVals := helm.MergeMaps(release.Values, extraVals)
|
||||
|
||||
return h.install(ctx, release.Chart, mergedVals)
|
||||
}
|
||||
|
||||
// InstallCilium sets up the cilium pod network.
|
||||
func (h *Client) InstallCilium(ctx context.Context, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
|
||||
h.ReleaseName = release.ReleaseName
|
||||
if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutS := int64(10)
|
||||
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
|
||||
tolerations := []corev1.Toleration{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.kubernetes.io/unreachable",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &timeoutS,
|
||||
},
|
||||
}
|
||||
if err := kubectl.AddTolerationsToDeployment(ctx, tolerations, "coredns", "kube-system"); err != nil {
|
||||
return fmt.Errorf("failed to add tolerations to coredns deployment: %w", err)
|
||||
}
|
||||
if err := kubectl.EnforceCoreDNSSpread(ctx); err != nil {
|
||||
return fmt.Errorf("failed to enforce CoreDNS spread: %w", err)
|
||||
}
|
||||
|
||||
switch in.CloudProvider {
|
||||
case "aws", "azure", "openstack", "qemu":
|
||||
return h.installCiliumGeneric(ctx, release, in.LoadBalancerHost, in.LoadBalancerPort)
|
||||
case "gcp":
|
||||
return h.installCiliumGCP(ctx, release, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR, in.LoadBalancerHost, in.LoadBalancerPort)
|
||||
default:
|
||||
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
|
||||
}
|
||||
}
|
||||
|
||||
// installCiliumGeneric installs cilium with the given load balancer endpoint.
|
||||
// This is used for cloud providers that do not require special server-side configuration.
|
||||
// Currently this is AWS, Azure, and QEMU.
|
||||
func (h *Client) installCiliumGeneric(ctx context.Context, release helm.Release, kubeAPIHost, kubeAPIPort string) error {
|
||||
release.Values["k8sServiceHost"] = kubeAPIHost
|
||||
release.Values["k8sServicePort"] = kubeAPIPort
|
||||
|
||||
return h.install(ctx, release.Chart, release.Values)
|
||||
}
|
||||
|
||||
func (h *Client) installCiliumGCP(ctx context.Context, release helm.Release, nodeName, nodePodCIDR, subnetworkPodCIDR, kubeAPIHost, kubeAPIPort string) error {
|
||||
out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", constants.ControlPlaneAdminConfFilename, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
// configure pod network CIDR
|
||||
release.Values["ipv4NativeRoutingCIDR"] = subnetworkPodCIDR
|
||||
release.Values["strictModeCIDR"] = subnetworkPodCIDR
|
||||
release.Values["k8sServiceHost"] = kubeAPIHost
|
||||
release.Values["k8sServicePort"] = kubeAPIPort
|
||||
|
||||
return h.install(ctx, release.Chart, release.Values)
|
||||
}
|
||||
|
||||
// install tries to install the given chart and aborts after ~5 tries.
|
||||
// The function will wait 30 seconds before retrying a failed installation attempt.
|
||||
// After 3 tries, the retrier will be canceled and the function returns with an error.
|
||||
func (h *Client) install(ctx context.Context, chartRaw []byte, values map[string]any) error {
|
||||
var retries int
|
||||
retriable := func(err error) bool {
|
||||
// abort after maximumRetryAttempts tries.
|
||||
if retries >= maximumRetryAttempts {
|
||||
return false
|
||||
}
|
||||
retries++
|
||||
// only retry if atomic is set
|
||||
// otherwise helm doesn't uninstall
|
||||
// the release on failure
|
||||
if !h.Atomic {
|
||||
return false
|
||||
}
|
||||
// check if error is retriable
|
||||
return wait.Interrupted(err) ||
|
||||
strings.Contains(err.Error(), "connection refused")
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(chartRaw)
|
||||
chart, err := loader.LoadArchive(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("helm load archive: %w", err)
|
||||
}
|
||||
|
||||
doer := installDoer{
|
||||
h,
|
||||
chart,
|
||||
values,
|
||||
h.log,
|
||||
}
|
||||
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, retriable)
|
||||
|
||||
retryLoopStartTime := time.Now()
|
||||
if err := retrier.Do(ctx); err != nil {
|
||||
return fmt.Errorf("helm install: %w", err)
|
||||
}
|
||||
retryLoopFinishDuration := time.Since(retryLoopStartTime)
|
||||
h.log.With(zap.String("chart", chart.Name()), zap.Duration("duration", retryLoopFinishDuration)).Infof("Helm chart installation finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Client) setWaitMode(waitMode helm.WaitMode) error {
|
||||
switch waitMode {
|
||||
case helm.WaitModeNone:
|
||||
h.Wait = false
|
||||
h.Atomic = false
|
||||
case helm.WaitModeWait:
|
||||
h.Wait = true
|
||||
h.Atomic = false
|
||||
case helm.WaitModeAtomic:
|
||||
h.Wait = true
|
||||
h.Atomic = true
|
||||
default:
|
||||
return fmt.Errorf("unknown wait mode %q", waitMode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// installDoer is a help struct to enable retrying helm's install action.
|
||||
type installDoer struct {
|
||||
client *Client
|
||||
chart *chart.Chart
|
||||
values map[string]any
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// Do logs which chart is installed and tries to install it.
|
||||
func (i installDoer) Do(ctx context.Context) error {
|
||||
i.log.With(zap.String("chart", i.chart.Name())).Infof("Trying to install Helm chart")
|
||||
|
||||
if _, err := i.client.RunWithContext(ctx, i.chart, i.values); err != nil {
|
||||
i.log.With(zap.Error(err), zap.String("chart", i.chart.Name())).Errorf("Helm chart installation failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package helm
|
@ -4,6 +4,7 @@ load("//bazel/go:go_test.bzl", "go_test")
|
||||
go_library(
|
||||
name = "kubernetes",
|
||||
srcs = [
|
||||
"ciliuminstallation.go",
|
||||
"cloud_provider.go",
|
||||
"k8sutil.go",
|
||||
"kubernetes.go",
|
||||
|
81
bootstrapper/internal/kubernetes/ciliuminstallation.go
Normal file
81
bootstrapper/internal/kubernetes/ciliuminstallation.go
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// Package kubernetes provides functionality to bootstrap a Kubernetes cluster, or join an exiting one.
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// installCilium sets up the cilium pod network.
|
||||
func installCilium(ctx context.Context, helmInstaller helmClient, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
|
||||
timeoutS := int64(10)
|
||||
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
|
||||
tolerations := []corev1.Toleration{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.kubernetes.io/unreachable",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &timeoutS,
|
||||
},
|
||||
}
|
||||
if err := kubectl.AddTolerationsToDeployment(ctx, tolerations, "coredns", "kube-system"); err != nil {
|
||||
return fmt.Errorf("failed to add tolerations to coredns deployment: %w", err)
|
||||
}
|
||||
if err := kubectl.EnforceCoreDNSSpread(ctx); err != nil {
|
||||
return fmt.Errorf("failed to enforce CoreDNS spread: %w", err)
|
||||
}
|
||||
|
||||
switch in.CloudProvider {
|
||||
case "aws", "azure", "openstack", "qemu":
|
||||
return installCiliumGeneric(ctx, helmInstaller, release, in.LoadBalancerHost, in.LoadBalancerPort)
|
||||
case "gcp":
|
||||
return installCiliumGCP(ctx, helmInstaller, release, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR, in.LoadBalancerHost, in.LoadBalancerPort)
|
||||
default:
|
||||
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
|
||||
}
|
||||
}
|
||||
|
||||
// installCiliumGeneric installs cilium with the given load balancer endpoint.
|
||||
// This is used for cloud providers that do not require special server-side configuration.
|
||||
// Currently this is AWS, Azure, and QEMU.
|
||||
func installCiliumGeneric(ctx context.Context, helmInstaller helmClient, release helm.Release, kubeAPIHost, kubeAPIPort string) error {
|
||||
if release.Values != nil {
|
||||
release.Values["k8sServiceHost"] = kubeAPIHost
|
||||
release.Values["k8sServicePort"] = kubeAPIPort
|
||||
}
|
||||
return helmInstaller.InstallChart(ctx, release)
|
||||
}
|
||||
|
||||
func installCiliumGCP(ctx context.Context, helmInstaller helmClient, release helm.Release, nodeName, nodePodCIDR, subnetworkPodCIDR, kubeAPIHost, kubeAPIPort string) error {
|
||||
out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", constants.ControlPlaneAdminConfFilename, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
// configure pod network CIDR
|
||||
release.Values["ipv4NativeRoutingCIDR"] = subnetworkPodCIDR
|
||||
release.Values["strictModeCIDR"] = subnetworkPodCIDR
|
||||
release.Values["k8sServiceHost"] = kubeAPIHost
|
||||
release.Values["k8sServicePort"] = kubeAPIPort
|
||||
|
||||
return helmInstaller.InstallChart(ctx, release)
|
||||
}
|
@ -10,7 +10,6 @@ import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||
@ -29,6 +28,6 @@ type clusterUtil interface {
|
||||
// helmClient bundles functions related to microservice deployment.
|
||||
// Only microservices that can be deployed purely via Helm are deployed with this interface.
|
||||
type helmClient interface {
|
||||
InstallCilium(context.Context, k8sapi.Client, helm.Release, k8sapi.SetupPodNetworkInput) error
|
||||
InstallChart(ctx context.Context, release helm.Release, extraVals map[string]any) error
|
||||
InstallChart(context.Context, helm.Release) error
|
||||
InstallChartWithValues(ctx context.Context, release helm.Release, extraValues map[string]any) error
|
||||
}
|
||||
|
@ -18,11 +18,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubewaiter"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
@ -34,6 +29,10 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/role"
|
||||
"github.com/edgelesssys/constellation/v2/internal/versions/components"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
var validHostnameRegex = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
||||
@ -193,7 +192,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
}
|
||||
|
||||
log.Infof("Installing Cilium")
|
||||
if err = k.helmClient.InstallCilium(ctx, k.client, helmReleases.Cilium, setupPodNetworkInput); err != nil {
|
||||
if err = installCilium(ctx, k.helmClient, k.client, helmReleases.Cilium, setupPodNetworkInput); err != nil {
|
||||
return nil, fmt.Errorf("installing pod network: %w", err)
|
||||
}
|
||||
|
||||
@ -221,7 +220,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
cloudServiceAccountURI: cloudServiceAccountURI,
|
||||
loadBalancerIP: controlPlaneHost,
|
||||
}
|
||||
extraVals, err := k.setupExtraVals(ctx, serviceConfig)
|
||||
constellationVals, err := k.setupExtraVals(ctx, serviceConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up extraVals: %w", err)
|
||||
}
|
||||
@ -232,14 +231,14 @@ func (k *KubeWrapper) InitCluster(
|
||||
}
|
||||
|
||||
log.Infof("Installing Constellation microservices")
|
||||
if err = k.helmClient.InstallChart(ctx, helmReleases.ConstellationServices, extraVals); err != nil {
|
||||
if err = k.helmClient.InstallChartWithValues(ctx, helmReleases.ConstellationServices, constellationVals); err != nil {
|
||||
return nil, fmt.Errorf("installing constellation-services: %w", err)
|
||||
}
|
||||
|
||||
// cert-manager provides CRDs used by other deployments,
|
||||
// so it should be installed as early as possible, but after the services cert-manager depends on.
|
||||
log.Infof("Installing cert-manager")
|
||||
if err = k.helmClient.InstallChart(ctx, helmReleases.CertManager, nil); err != nil {
|
||||
if err = k.helmClient.InstallChart(ctx, helmReleases.CertManager); err != nil {
|
||||
return nil, fmt.Errorf("installing cert-manager: %w", err)
|
||||
}
|
||||
|
||||
@ -260,11 +259,18 @@ func (k *KubeWrapper) InitCluster(
|
||||
}
|
||||
|
||||
log.Infof("Installing CSI deployments")
|
||||
if err := k.helmClient.InstallChart(ctx, *helmReleases.CSI, csiVals); err != nil {
|
||||
if err := k.helmClient.InstallChartWithValues(ctx, *helmReleases.CSI, csiVals); err != nil {
|
||||
return nil, fmt.Errorf("installing CSI snapshot CRDs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if helmReleases.AWSLoadBalancerController != nil {
|
||||
log.Infof("Installing AWS Load Balancer Controller")
|
||||
if err = k.helmClient.InstallChart(ctx, *helmReleases.AWSLoadBalancerController); err != nil {
|
||||
return nil, fmt.Errorf("installing AWS Load Balancer Controller: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
operatorVals, err := k.setupOperatorVals(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up operator vals: %w", err)
|
||||
@ -273,7 +279,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
// Constellation operators require CRDs from cert-manager.
|
||||
// They must be installed after it.
|
||||
log.Infof("Installing operators")
|
||||
if err = k.helmClient.InstallChart(ctx, helmReleases.Operators, operatorVals); err != nil {
|
||||
if err = k.helmClient.InstallChartWithValues(ctx, helmReleases.ConstellationOperators, operatorVals); err != nil {
|
||||
return nil, fmt.Errorf("installing operators: %w", err)
|
||||
}
|
||||
|
||||
|
@ -182,6 +182,7 @@ func TestInitCluster(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
kube := KubeWrapper{
|
||||
cloudProvider: "aws", // provide a valid cloud provider for cilium installation
|
||||
clusterUtil: &tc.clusterUtil,
|
||||
helmClient: &tc.helmClient,
|
||||
providerMetadata: tc.providerMetadata,
|
||||
@ -566,8 +567,17 @@ func (s *stubHelmClient) InstallCilium(_ context.Context, _ k8sapi.Client, _ hel
|
||||
return s.ciliumError
|
||||
}
|
||||
|
||||
func (s *stubHelmClient) InstallChart(_ context.Context, _ helm.Release, _ map[string]any) error {
|
||||
return s.installChartError
|
||||
func (s *stubHelmClient) InstallChart(ctx context.Context, release helm.Release) error {
|
||||
return s.InstallChartWithValues(ctx, release, release.Values)
|
||||
}
|
||||
|
||||
func (s *stubHelmClient) InstallChartWithValues(_ context.Context, release helm.Release, _ map[string]any) error {
|
||||
switch release.ReleaseName {
|
||||
case "cilium":
|
||||
return s.ciliumError
|
||||
default:
|
||||
return s.installChartError
|
||||
}
|
||||
}
|
||||
|
||||
type stubKubeAPIWaiter struct {
|
||||
|
@ -26,8 +26,8 @@ type imageFetcher interface {
|
||||
|
||||
type terraformClient interface {
|
||||
PrepareWorkspace(path string, input terraform.Variables) error
|
||||
ApplyIAMConfig(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
||||
CreateCluster(ctx context.Context, logLevel terraform.LogLevel) (terraform.ApplyOutput, error)
|
||||
CreateIAMConfig(ctx context.Context, provider cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
||||
Destroy(ctx context.Context, logLevel terraform.LogLevel) error
|
||||
CleanUpWorkspace() error
|
||||
RemoveInstaller()
|
||||
|
@ -54,7 +54,7 @@ func (c *stubTerraformClient) CreateCluster(_ context.Context, _ terraform.LogLe
|
||||
}, c.createClusterErr
|
||||
}
|
||||
|
||||
func (c *stubTerraformClient) CreateIAMConfig(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (terraform.IAMOutput, error) {
|
||||
func (c *stubTerraformClient) ApplyIAMConfig(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (terraform.IAMOutput, error) {
|
||||
return c.iamOutput, c.iamOutputErr
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ func (c *IAMCreator) createGCP(ctx context.Context, cl terraformClient, opts *IA
|
||||
return iamid.File{}, err
|
||||
}
|
||||
|
||||
iamOutput, err := cl.CreateIAMConfig(ctx, cloudprovider.GCP, opts.TFLogLevel)
|
||||
iamOutput, err := cl.ApplyIAMConfig(ctx, cloudprovider.GCP, opts.TFLogLevel)
|
||||
if err != nil {
|
||||
return iamid.File{}, err
|
||||
}
|
||||
@ -193,7 +193,7 @@ func (c *IAMCreator) createAzure(ctx context.Context, cl terraformClient, opts *
|
||||
return iamid.File{}, err
|
||||
}
|
||||
|
||||
iamOutput, err := cl.CreateIAMConfig(ctx, cloudprovider.Azure, opts.TFLogLevel)
|
||||
iamOutput, err := cl.ApplyIAMConfig(ctx, cloudprovider.Azure, opts.TFLogLevel)
|
||||
if err != nil {
|
||||
return iamid.File{}, err
|
||||
}
|
||||
@ -221,7 +221,7 @@ func (c *IAMCreator) createAWS(ctx context.Context, cl terraformClient, opts *IA
|
||||
return iamid.File{}, err
|
||||
}
|
||||
|
||||
iamOutput, err := cl.CreateIAMConfig(ctx, cloudprovider.AWS, opts.TFLogLevel)
|
||||
iamOutput, err := cl.ApplyIAMConfig(ctx, cloudprovider.AWS, opts.TFLogLevel)
|
||||
if err != nil {
|
||||
return iamid.File{}, err
|
||||
}
|
||||
|
@ -6,7 +6,10 @@ go_library(
|
||||
srcs = ["id.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/clusterid",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = ["//internal/cloud/cloudprovider"],
|
||||
deps = [
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/config",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
@ -8,6 +8,7 @@ package clusterid
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
)
|
||||
|
||||
// File contains state information about a cluster.
|
||||
@ -68,3 +69,8 @@ func (f *File) Merge(other File) *File {
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// GetClusterName returns the name of the cluster.
|
||||
func GetClusterName(cfg *config.Config, idFile File) string {
|
||||
return cfg.Name + "-" + idFile.UID
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ go_library(
|
||||
"spinner.go",
|
||||
"status.go",
|
||||
"terminate.go",
|
||||
"tfmigrationclient.go",
|
||||
"upgrade.go",
|
||||
"upgradeapply.go",
|
||||
"upgradecheck.go",
|
||||
|
@ -174,7 +174,11 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing or generating master secret from file %s: %w", flags.masterSecretPath, err)
|
||||
}
|
||||
helmLoader := helm.NewLoader(provider, k8sVersion)
|
||||
|
||||
clusterName := clusterid.GetClusterName(conf, idFile)
|
||||
i.log.Debugf("Setting cluster name to %s", clusterName)
|
||||
|
||||
helmLoader := helm.NewLoader(provider, k8sVersion, clusterName)
|
||||
i.log.Debugf("Created new Helm loader")
|
||||
helmDeployments, err := helmLoader.Load(conf, flags.conformance, flags.helmWaitMode, masterSecret.Key, masterSecret.Salt)
|
||||
i.log.Debugf("Loaded Helm deployments")
|
||||
@ -182,9 +186,6 @@ func (i *initCmd) initialize(cmd *cobra.Command, newDialer func(validator atls.V
|
||||
return fmt.Errorf("loading Helm charts: %w", err)
|
||||
}
|
||||
|
||||
clusterName := conf.Name + "-" + idFile.UID
|
||||
i.log.Debugf("Setting cluster name to %s", clusterName)
|
||||
|
||||
cmd.PrintErrln("Note: If you just created the cluster, it can take a few minutes to connect.")
|
||||
i.spinner.Start("Connecting ", false)
|
||||
req := &initproto.InitRequest{
|
||||
|
68
cli/internal/cmd/tfmigrationclient.go
Normal file
68
cli/internal/cmd/tfmigrationclient.go
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// tfMigrationClient is a client for planning and applying Terraform migrations.
|
||||
type tfMigrationClient struct {
|
||||
log debugLog
|
||||
}
|
||||
|
||||
// planMigration checks for Terraform migrations and asks for confirmation if there are any. The user input is returned as confirmedDiff.
|
||||
// adapted from migrateTerraform().
|
||||
func (u *tfMigrationClient) planMigration(cmd *cobra.Command, file file.Handler, migrateCmd upgrade.TfMigrationCmd) (hasDiff bool, err error) {
|
||||
u.log.Debugf("Planning %s", migrateCmd.String())
|
||||
if err := migrateCmd.CheckTerraformMigrations(file); err != nil {
|
||||
return false, fmt.Errorf("checking workspace: %w", err)
|
||||
}
|
||||
hasDiff, err = migrateCmd.Plan(cmd.Context(), file, cmd.OutOrStdout())
|
||||
if err != nil {
|
||||
return hasDiff, fmt.Errorf("planning terraform migrations: %w", err)
|
||||
}
|
||||
return hasDiff, nil
|
||||
}
|
||||
|
||||
// applyMigration plans and then applies the Terraform migration. The user is asked for confirmation if there are any changes.
|
||||
// adapted from migrateTerraform().
|
||||
func (u *tfMigrationClient) applyMigration(cmd *cobra.Command, file file.Handler, migrateCmd upgrade.TfMigrationCmd, flags upgradeApplyFlags) error {
|
||||
hasDiff, err := u.planMigration(cmd, file, migrateCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("planning terraform migrations: %w", err)
|
||||
}
|
||||
if hasDiff {
|
||||
// If there are any Terraform migrations to apply, ask for confirmation
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "The %s upgrade requires a migration of Constellation cloud resources by applying an updated Terraform template. Please manually review the suggested changes below.\n", migrateCmd.String())
|
||||
if !flags.yes {
|
||||
ok, err := askToConfirm(cmd, fmt.Sprintf("Do you want to apply the %s?", migrateCmd.String()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("asking for confirmation: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
cmd.Println("Aborting upgrade.")
|
||||
if err := upgrade.CleanUpTerraformMigrations(migrateCmd.UpgradeID(), file); err != nil {
|
||||
return fmt.Errorf("cleaning up workspace: %w", err)
|
||||
}
|
||||
return fmt.Errorf("aborted by user")
|
||||
}
|
||||
}
|
||||
u.log.Debugf("Applying Terraform %s migrations", migrateCmd.String())
|
||||
err := migrateCmd.Apply(cmd.Context(), file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying terraform migrations: %w", err)
|
||||
}
|
||||
} else {
|
||||
u.log.Debugf("No Terraform diff detected")
|
||||
}
|
||||
return nil
|
||||
}
|
@ -70,15 +70,17 @@ func runUpgradeApply(cmd *cobra.Command, _ []string) error {
|
||||
imagefetcher := imagefetcher.New()
|
||||
configFetcher := attestationconfigapi.NewFetcher()
|
||||
|
||||
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log, imageFetcher: imagefetcher, configFetcher: configFetcher}
|
||||
applyCmd := upgradeApplyCmd{upgrader: upgrader, log: log, imageFetcher: imagefetcher, configFetcher: configFetcher, migrationExecutor: &tfMigrationClient{log}}
|
||||
return applyCmd.upgradeApply(cmd, fileHandler)
|
||||
}
|
||||
|
||||
type upgradeApplyCmd struct {
|
||||
upgrader cloudUpgrader
|
||||
imageFetcher imageFetcher
|
||||
configFetcher attestationconfigapi.Fetcher
|
||||
log debugLog
|
||||
upgrader cloudUpgrader
|
||||
imageFetcher imageFetcher
|
||||
configFetcher attestationconfigapi.Fetcher
|
||||
log debugLog
|
||||
migrationExecutor tfMigrationApplier
|
||||
migrationCmds []upgrade.TfMigrationCmd
|
||||
}
|
||||
|
||||
func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Handler) error {
|
||||
@ -109,7 +111,12 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
||||
if err := u.upgradeAttestConfigIfDiff(cmd, conf.GetAttestationConfig(), flags); err != nil {
|
||||
return fmt.Errorf("upgrading measurements: %w", err)
|
||||
}
|
||||
|
||||
for _, migrationCmd := range u.migrationCmds {
|
||||
if err := u.migrationExecutor.applyMigration(cmd, fileHandler, migrationCmd, flags); err != nil {
|
||||
return fmt.Errorf("executing %s migration: %w", migrationCmd.String(), err)
|
||||
}
|
||||
}
|
||||
// not moving existing Terraform migrator because of planned apply refactor
|
||||
if err := u.migrateTerraform(cmd, u.imageFetcher, conf, flags); err != nil {
|
||||
return fmt.Errorf("performing Terraform migrations: %w", err)
|
||||
}
|
||||
@ -130,7 +137,7 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, fileHandler file.Hand
|
||||
|
||||
if conf.GetProvider() == cloudprovider.Azure || conf.GetProvider() == cloudprovider.GCP || conf.GetProvider() == cloudprovider.AWS {
|
||||
var upgradeErr *compatibility.InvalidUpgradeError
|
||||
err = u.handleServiceUpgrade(cmd, conf, flags)
|
||||
err = u.handleServiceUpgrade(cmd, conf, idFile, flags)
|
||||
switch {
|
||||
case errors.As(err, &upgradeErr):
|
||||
cmd.PrintErrln(err)
|
||||
@ -293,8 +300,8 @@ func (u *upgradeApplyCmd) upgradeAttestConfigIfDiff(cmd *cobra.Command, newConfi
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.Config, flags upgradeApplyFlags) error {
|
||||
err := u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.DenyDestructive, flags.force)
|
||||
func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.Config, idFile clusterid.File, flags upgradeApplyFlags) error {
|
||||
err := u.upgrader.UpgradeHelmServices(cmd.Context(), conf, idFile, flags.upgradeTimeout, helm.DenyDestructive, flags.force)
|
||||
if errors.Is(err, helm.ErrConfirmationMissing) {
|
||||
if !flags.yes {
|
||||
cmd.PrintErrln("WARNING: Upgrading cert-manager will destroy all custom resources you have manually created that are based on the current version of cert-manager.")
|
||||
@ -307,7 +314,7 @@ func (u *upgradeApplyCmd) handleServiceUpgrade(cmd *cobra.Command, conf *config.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, flags.upgradeTimeout, helm.AllowDestructive, flags.force)
|
||||
err = u.upgrader.UpgradeHelmServices(cmd.Context(), conf, idFile, flags.upgradeTimeout, helm.AllowDestructive, flags.force)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -362,7 +369,7 @@ type upgradeApplyFlags struct {
|
||||
|
||||
type cloudUpgrader interface {
|
||||
UpgradeNodeVersion(ctx context.Context, conf *config.Config, force bool) error
|
||||
UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool, force bool) error
|
||||
UpgradeHelmServices(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive bool, force bool) error
|
||||
UpdateAttestationConfig(ctx context.Context, newConfig config.AttestationCfg) error
|
||||
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
|
||||
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, *corev1.ConfigMap, error)
|
||||
@ -372,3 +379,7 @@ type cloudUpgrader interface {
|
||||
CleanUpTerraformMigrations() error
|
||||
AddManualStateMigration(migration terraform.StateMigration)
|
||||
}
|
||||
|
||||
type tfMigrationApplier interface {
|
||||
applyMigration(cmd *cobra.Command, file file.Handler, migrateCmd upgrade.TfMigrationCmd, flags upgradeApplyFlags) error
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -143,7 +144,8 @@ func TestUpgradeApply(t *testing.T) {
|
||||
require.NoError(handler.WriteYAML(constants.ConfigFilename, cfg))
|
||||
require.NoError(handler.WriteJSON(constants.ClusterIDsFileName, clusterid.File{}))
|
||||
|
||||
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), imageFetcher: tc.fetcher, configFetcher: stubAttestationFetcher{}}
|
||||
upgrader := upgradeApplyCmd{upgrader: tc.upgrader, log: logger.NewTest(t), imageFetcher: tc.fetcher, configFetcher: stubAttestationFetcher{}, migrationExecutor: &migrationExecutorPlaceholder{}}
|
||||
|
||||
err := upgrader.upgradeApply(cmd, handler)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -154,6 +156,12 @@ func TestUpgradeApply(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type migrationExecutorPlaceholder struct{}
|
||||
|
||||
func (d *migrationExecutorPlaceholder) applyMigration(_ *cobra.Command, _ file.Handler, _ upgrade.TfMigrationCmd, _ upgradeApplyFlags) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type stubUpgrader struct {
|
||||
currentConfig config.AttestationCfg
|
||||
nodeVersionErr error
|
||||
@ -165,11 +173,15 @@ type stubUpgrader struct {
|
||||
cleanTerraformErr error
|
||||
}
|
||||
|
||||
func (u stubUpgrader) GetUpgradeID() string {
|
||||
return "test-upgrade"
|
||||
}
|
||||
|
||||
func (u stubUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _ bool) error {
|
||||
return u.nodeVersionErr
|
||||
}
|
||||
|
||||
func (u stubUpgrader) UpgradeHelmServices(_ context.Context, _ *config.Config, _ time.Duration, _, _ bool) error {
|
||||
func (u stubUpgrader) UpgradeHelmServices(_ context.Context, _ *config.Config, _ clusterid.File, _ time.Duration, _, _ bool) error {
|
||||
return u.helmErr
|
||||
}
|
||||
|
||||
|
@ -64,20 +64,24 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
||||
return fmt.Errorf("creating logger: %w", err)
|
||||
}
|
||||
defer log.Sync()
|
||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
flags, err := parseUpgradeCheckFlags(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileHandler := file.NewHandler(afero.NewOsFs())
|
||||
checker, err := kubernetes.NewUpgrader(cmd.Context(), cmd.OutOrStdout(), fileHandler, log, kubernetes.UpgradeCmdKindCheck)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("setting up Kubernetes upgrader: %w", err)
|
||||
}
|
||||
versionfetcher := versionsapi.NewFetcher()
|
||||
rekor, err := sigstore.NewRekor()
|
||||
if err != nil {
|
||||
return fmt.Errorf("constructing Rekor client: %w", err)
|
||||
}
|
||||
iamMigrateCmd, err := upgrade.NewIAMMigrateCmd(cmd.Context(), checker.GetUpgradeID(), cloudprovider.AWS, terraform.LogLevelDebug)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up IAM migration command: %w", err)
|
||||
}
|
||||
up := &upgradeCheckCmd{
|
||||
canUpgradeCheck: featureset.CanUpgradeCheck,
|
||||
collect: &versionCollector{
|
||||
@ -93,9 +97,11 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error {
|
||||
log: log,
|
||||
versionsapi: versionfetcher,
|
||||
},
|
||||
checker: checker,
|
||||
imagefetcher: imagefetcher.New(),
|
||||
log: log,
|
||||
checker: checker,
|
||||
imagefetcher: imagefetcher.New(),
|
||||
log: log,
|
||||
iamMigrateCmd: iamMigrateCmd,
|
||||
planExecutor: &tfMigrationClient{log},
|
||||
}
|
||||
|
||||
return up.upgradeCheck(cmd, fileHandler, attestationconfigapi.NewFetcher(), flags)
|
||||
@ -142,12 +148,18 @@ func parseUpgradeCheckFlags(cmd *cobra.Command) (upgradeCheckFlags, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
type tfPlanner interface {
|
||||
planMigration(cmd *cobra.Command, file file.Handler, migrateCmd upgrade.TfMigrationCmd) (hasDiff bool, err error)
|
||||
}
|
||||
|
||||
type upgradeCheckCmd struct {
|
||||
canUpgradeCheck bool
|
||||
collect collector
|
||||
checker upgradeChecker
|
||||
imagefetcher imageFetcher
|
||||
log debugLog
|
||||
iamMigrateCmd upgrade.TfMigrationCmd
|
||||
planExecutor tfPlanner
|
||||
}
|
||||
|
||||
// upgradePlan plans an upgrade of a Constellation cluster.
|
||||
@ -192,6 +204,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
||||
newServices := supported.service
|
||||
if err := compatibility.IsValidUpgrade(current.service, supported.service); err != nil {
|
||||
newServices = ""
|
||||
u.log.Debugf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service, supported.service)
|
||||
}
|
||||
|
||||
newKubernetes := filterK8sUpgrades(current.k8s, supported.k8s)
|
||||
@ -203,7 +216,22 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fileHandler file.Hand
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.Println("The following IAM migrations are available with this CLI:")
|
||||
u.log.Debugf("Planning IAM migrations")
|
||||
if u.iamMigrateCmd != nil {
|
||||
hasIAMDiff, err := u.planExecutor.planMigration(cmd, fileHandler, u.iamMigrateCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("planning IAM migration: %w", err)
|
||||
}
|
||||
if !hasIAMDiff {
|
||||
cmd.Println(" No IAM migrations are available.")
|
||||
}
|
||||
}
|
||||
|
||||
u.log.Debugf("Planning Terraform migrations")
|
||||
if err := u.checker.CheckTerraformMigrations(); err != nil {
|
||||
return fmt.Errorf("checking workspace: %w", err)
|
||||
}
|
||||
|
||||
// TODO(AB#3248): Remove this migration after we can assume that all existing clusters have been migrated.
|
||||
var awsZone string
|
||||
|
@ -310,6 +310,21 @@ go_library(
|
||||
"charts/edgeless/constellation-services/charts/yawol-config/templates/secret.yaml",
|
||||
"charts/edgeless/constellation-services/charts/yawol-config/values.schema.json",
|
||||
"charts/edgeless/constellation-services/charts/yawol-config/values.yaml",
|
||||
"charts/aws-load-balancer-controller/.helmignore",
|
||||
"charts/aws-load-balancer-controller/Chart.yaml",
|
||||
"charts/aws-load-balancer-controller/README.md",
|
||||
"charts/aws-load-balancer-controller/crds/crds.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/NOTES.txt",
|
||||
"charts/aws-load-balancer-controller/templates/_helpers.tpl",
|
||||
"charts/aws-load-balancer-controller/templates/deployment.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/ingressclass.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/pdb.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/rbac.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/service.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/serviceaccount.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/servicemonitor.yaml",
|
||||
"charts/aws-load-balancer-controller/templates/webhook.yaml",
|
||||
"charts/aws-load-balancer-controller/values.yaml",
|
||||
"charts/edgeless/csi/Chart.yaml",
|
||||
"charts/edgeless/csi/charts/azuredisk-csi-driver/Chart.yaml",
|
||||
"charts/edgeless/csi/charts/azuredisk-csi-driver/templates/_helpers.tpl",
|
||||
@ -399,6 +414,7 @@ go_library(
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/helm",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
"//cli/internal/clusterid",
|
||||
"//cli/internal/helm/imageversion",
|
||||
"//internal/cloud/cloudprovider",
|
||||
"//internal/compatibility",
|
||||
@ -434,6 +450,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":helm"],
|
||||
deps = [
|
||||
"//cli/internal/clusterid",
|
||||
"//internal/attestation/idkeydigest",
|
||||
"//internal/attestation/measurements",
|
||||
"//internal/cloud/cloudprovider",
|
||||
|
@ -0,0 +1,25 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
crds/kustomization.yaml
|
||||
test.yaml
|
@ -0,0 +1,22 @@
|
||||
apiVersion: v2
|
||||
name: aws-load-balancer-controller
|
||||
description: AWS Load Balancer Controller Helm chart for Kubernetes
|
||||
version: 1.5.4
|
||||
appVersion: v2.5.3
|
||||
home: https://github.com/aws/eks-charts
|
||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||
sources:
|
||||
- https://github.com/aws/eks-charts
|
||||
maintainers:
|
||||
- name: kishorj
|
||||
url: https://github.com/kishorj
|
||||
email: kishorj@users.noreply.github.com
|
||||
- name: m00nf1sh
|
||||
url: https://github.com/m00nf1sh
|
||||
email: m00nf1sh@users.noreply.github.com
|
||||
keywords:
|
||||
- eks
|
||||
- alb
|
||||
- load balancer
|
||||
- ingress
|
||||
- nlb
|
258
cli/internal/helm/charts/aws-load-balancer-controller/README.md
Normal file
258
cli/internal/helm/charts/aws-load-balancer-controller/README.md
Normal file
@ -0,0 +1,258 @@
|
||||
# AWS Load Balancer Controller
|
||||
|
||||
AWS Load Balancer controller Helm chart for Kubernetes
|
||||
|
||||
## TL;DR:
|
||||
```sh
|
||||
helm repo add eks https://aws.github.io/eks-charts
|
||||
# If using IAM Roles for service account install as follows - NOTE: you need to specify both of the chart values `serviceAccount.create=false` and `serviceAccount.name=aws-load-balancer-controller`
|
||||
helm install aws-load-balancer-controller eks/aws-load-balancer-controller --set clusterName=my-cluster -n kube-system --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller
|
||||
# If not using IAM Roles for service account
|
||||
helm install aws-load-balancer-controller eks/aws-load-balancer-controller --set clusterName=my-cluster -n kube-system
|
||||
```
|
||||
|
||||
## Introduction
|
||||
AWS Load Balancer controller manages the following AWS resources
|
||||
- Application Load Balancers to satisfy Kubernetes ingress objects
|
||||
- Network Load Balancers to satisfy Kubernetes service objects of type LoadBalancer with appropriate annotations
|
||||
|
||||
## Security updates
|
||||
**Note**: Deployed chart does not receive security updates automatically. You need to manually upgrade to a newer chart.
|
||||
#### Node isolation
|
||||
As a security best practice, we recommend isolating the controller deployment pods to specific node groups which run critical components. The helm chart provides parameters ```nodeSelector```, ```tolerations``` and ```affinity``` to configure node isolation. For more information, please refer to the guidance [here](https://aws.github.io/aws-eks-best-practices/security/docs/multitenancy/#isolating-tenant-workloads-to-specific-nodes).
|
||||
|
||||
## Prerequisites
|
||||
- Kubernetes >= 1.19
|
||||
- IAM permissions
|
||||
- Helm v3
|
||||
- Optional dependencies
|
||||
- cert-manager
|
||||
- Prometheus Operator
|
||||
|
||||
The controller runs on the worker nodes, so it needs access to the AWS ALB/NLB resources via IAM permissions. The
|
||||
IAM permissions can either be setup via IAM roles for ServiceAccount or can be attached directly to the worker node IAM roles.
|
||||
|
||||
#### Setup IAM for ServiceAccount
|
||||
1. Create IAM OIDC provider
|
||||
```
|
||||
eksctl utils associate-iam-oidc-provider \
|
||||
--region <aws-region> \
|
||||
--cluster <your-cluster-name> \
|
||||
--approve
|
||||
```
|
||||
1. Download IAM policy for the AWS Load Balancer Controller
|
||||
```
|
||||
curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json
|
||||
```
|
||||
1. Create an IAM policy called AWSLoadBalancerControllerIAMPolicy
|
||||
```
|
||||
aws iam create-policy \
|
||||
--policy-name AWSLoadBalancerControllerIAMPolicy \
|
||||
--policy-document file://iam-policy.json
|
||||
```
|
||||
Take note of the policy ARN that is returned
|
||||
|
||||
1. Create a IAM role and ServiceAccount for the Load Balancer controller, use the ARN from the step above
|
||||
```
|
||||
eksctl create iamserviceaccount \
|
||||
--cluster=<cluster-name> \
|
||||
--namespace=kube-system \
|
||||
--name=aws-load-balancer-controller \
|
||||
--attach-policy-arn=arn:aws:iam::<AWS_ACCOUNT_ID>:policy/AWSLoadBalancerControllerIAMPolicy \
|
||||
--approve
|
||||
```
|
||||
#### Setup IAM manually
|
||||
If not setting up IAM for ServiceAccount, apply the IAM policies from the following URL at minimum.
|
||||
```
|
||||
https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/main/docs/install/iam_policy.json
|
||||
```
|
||||
|
||||
#### Upgrading from ALB ingress controller
|
||||
If migrating from ALB ingress controller, grant [additional IAM permissions](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy_v1_to_v2_additional.json).
|
||||
|
||||
#### Upgrading from AWS Load Balancer controller v2.1.3 and earlier
|
||||
- Additional IAM permissions required, ensure you have granted the [required IAM permissions](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json).
|
||||
- CRDs need to be updated as follows
|
||||
```shell script
|
||||
kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master"
|
||||
```
|
||||
- you can run helm upgrade without uninstalling the old chart completely
|
||||
|
||||
#### Installing cert-manager
|
||||
|
||||
If you are setting `enableCertManager: true` you need to have installed cert-manager and it's CRDs before installing this chart; to install [cert-manager](https://artifacthub.io/packages/helm/cert-manager/cert-manager) follow the installation guide.
|
||||
|
||||
The controller helm chart requires the cert-manager with apiVersion `cert-manager.io/v1`.
|
||||
|
||||
Set `cluster.dnsDomain` (default: `cluster.local`) to the actual DNS domain of your cluster to include the FQDN in requested TLS certificates.
|
||||
|
||||
#### Installing the Prometheus Operator
|
||||
|
||||
If you are setting `serviceMonitor.enabled: true` you need to have installed the Prometheus Operator ServiceMonitor CRD before installing this chart and have the operator running to collect the metrics. The easiest way to do this is to install the [kube-prometheus-stack](https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack) Helm chart using the installation guide.
|
||||
|
||||
## Installing the Chart
|
||||
**Note**: You need to uninstall aws-alb-ingress-controller. Please refer to the [upgrade](#Upgrade) section below before you proceed.
|
||||
**Note**: Starting chart version 1.4.1, you need to explicitly set `clusterSecretsPermissions.allowAllSecrets` to true to grant the controller permission to access all secrets for OIDC feature. We recommend configuring access to individual secrets resource separately [[link](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/examples/secrets_access/)].
|
||||
|
||||
Add the EKS repository to Helm:
|
||||
```shell script
|
||||
helm repo add eks https://aws.github.io/eks-charts
|
||||
```
|
||||
|
||||
Install the TargetGroupBinding CRDs:
|
||||
|
||||
```shell script
|
||||
kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master"
|
||||
```
|
||||
|
||||
Install the AWS Load Balancer controller, if using iamserviceaccount
|
||||
```shell script
|
||||
# NOTE: The clusterName value must be set either via the values.yaml or the Helm command line. The <k8s-cluster-name> in the command
|
||||
# below should be replaced with name of your k8s cluster before running it.
|
||||
helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=<k8s-cluster-name> --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller
|
||||
```
|
||||
|
||||
Install the AWS Load Balancer controller, if not using iamserviceaccount
|
||||
```shell script
|
||||
helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=<k8s-cluster-name>
|
||||
```
|
||||
|
||||
## Upgrade
|
||||
The new controller is backwards compatible with the existing ingress objects. However, it will not coexist with the older aws-alb-ingress-controller.
|
||||
The old controller must be uninstalled completely before installing the new version.
|
||||
### Kubectl installation
|
||||
If you had installed the previous version via kubectl, uninstall as follows
|
||||
```shell script
|
||||
$ kubectl delete deployment -n kube-system alb-ingress-controller
|
||||
$ kubectl delete clusterRole alb-ingress-controller
|
||||
$ kubectl delete ClusterRoleBinding alb-ingress-controller
|
||||
$ kubectl delete ServiceAccount -n kube-system alb-ingress-controller
|
||||
|
||||
# Alternatively you can find the version of the controller and delete as follows
|
||||
$ kubectl describe deployment -n kube-system alb-ingress-controller |grep Image
|
||||
Image: docker.io/amazon/aws-alb-ingress-controller:v1.1.8
|
||||
# You can delete the deployment now
|
||||
$ kubectl delete deployment -n kube-system alb-ingress-controller
|
||||
# In this case, the version is v1.1.8, the rbac roles can be removed as follows
|
||||
$ kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.8/docs/examples/rbac-role.yaml
|
||||
```
|
||||
### Helm installation
|
||||
If you had installed the incubator/aws-alb-ingress-controller Helm chart, uninstall as follows
|
||||
```shell script
|
||||
# NOTE: If installed under a different chart name and namespace, please specify as appropriate
|
||||
$ helm delete aws-alb-ingress-controller -n kube-system
|
||||
```
|
||||
|
||||
If you had installed the 0.1.x version of eks-charts/aws-load-balancer-controller chart earlier, the upgrade to chart version 1.0.0 will
|
||||
not work due to incompatibility of the webhook api version, uninstall as follows
|
||||
```shell script
|
||||
$ helm delete aws-load-balancer-controller -n kube-system
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
```sh
|
||||
helm delete aws-load-balancer-controller -n kube-system
|
||||
```
|
||||
|
||||
If you setup IAM Roles for ServiceAccount, you can cleanup as follows
|
||||
```
|
||||
eksctl delete iamserviceaccount --cluster <cluster-name> --namespace kube-system --name aws-load-balancer-controller
|
||||
```
|
||||
|
||||
## HA configuration
|
||||
Chart release v1.2.0 and later enables high availability configuration by default.
|
||||
- The default number of replicas is 2. You can pass`--set replicaCount=1` flag during chart installation to disable this. Due to leader election, only one controller will actively reconcile resources.
|
||||
- The default priority class for the controller pods is `system-cluster-critical`
|
||||
- Soft pod anti-affinity is enabled for controller pods with `topologyKey: kubernetes.io/hostname` if you don't configure custom affinity and set `configureDefaultAffinity` to `true`
|
||||
- Pod disruption budget (PDB) has not been set by default. If you plan on running at least 2 controller pods, you can pass `--set podDisruptionBudget.maxUnavailable=1` flag during chart installation
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the chart and their default values.
|
||||
The default values set by the application itself can be confirmed [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/configurations/#controller-configuration-options).
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|
|
||||
| `image.repository` | image repository | `public.ecr.aws/eks/aws-load-balancer-controller` |
|
||||
| `image.tag` | image tag | `<VERSION>` |
|
||||
| `image.pullPolicy` | image pull policy | `IfNotPresent` |
|
||||
| `clusterName` | Kubernetes cluster name | None |
|
||||
| `cluster.dnsDomain` | DNS domain of the Kubernetes cluster, included in TLS certificate requests | `cluster.local` |
|
||||
| `securityContext` | Set to security context for pod | `{}` |
|
||||
| `resources` | Controller pod resource requests & limits | `{}` |
|
||||
| `priorityClassName` | Controller pod priority class | system-cluster-critical |
|
||||
| `nodeSelector` | Node labels for controller pod assignment | `{}` |
|
||||
| `tolerations` | Controller pod toleration for taints | `{}` |
|
||||
| `affinity` | Affinity for pod assignment | `{}` |
|
||||
| `configureDefaultAffinity` | Configure soft pod anti-affinity if custom affinity is not configured | `true` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `{}` |
|
||||
| `deploymentAnnotations` | Annotations to add to deployment | `{}` |
|
||||
| `podAnnotations` | Annotations to add to each pod | `{}` |
|
||||
| `podLabels` | Labels to add to each pod | `{}` |
|
||||
| `additionalLabels` | Labels to add to all components | `{}` |
|
||||
| `rbac.create` | if `true`, create and use RBAC resources | `true` |
|
||||
| `serviceAccount.annotations` | optional annotations to add to service account | None |
|
||||
| `serviceAccount.automountServiceAccountToken` | Automount API credentials for a Service Account | `true` |
|
||||
| `serviceAccount.imagePullSecrets` | List of image pull secrets to add to the Service Account | `[]` |
|
||||
| `serviceAccount.create` | If `true`, create a new service account | `true` |
|
||||
| `serviceAccount.name` | Service account to be used | None |
|
||||
| `terminationGracePeriodSeconds` | Time period for controller pod to do a graceful shutdown | 10 |
|
||||
| `ingressClass` | The ingress class to satisfy | alb |
|
||||
| `createIngressClassResource` | Create ingressClass resource | true |
|
||||
| `ingressClassParams.name` | IngressClassParams resource's name, default to the aws load balancer controller's name | None |
|
||||
| `ingressClassParams.create` | If `true`, create a new ingressClassParams | true |
|
||||
| `ingressClassParams.spec` | IngressClassParams defined ingress specifications | {} |
|
||||
| `region` | The AWS region for the kubernetes cluster | None |
|
||||
| `vpcId` | The VPC ID for the Kubernetes cluster | None |
|
||||
| `awsApiEndpoints` | Custom AWS API Endpoints | None |
|
||||
| `awsApiThrottle` | Custom AWS API throttle settings | None |
|
||||
| `awsMaxRetries` | Maximum retries for AWS APIs | None |
|
||||
| `defaultTargetType` | Default target type. Used as the default value of the `alb.ingress.kubernetes.io/target-type` and `service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" annotations.`Possible values are `ip` and `instance`. | `instance` |
|
||||
| `enablePodReadinessGateInject` | If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods | None |
|
||||
| `enableShield` | Enable Shield addon for ALB | None |
|
||||
| `enableWaf` | Enable WAF addon for ALB | None |
|
||||
| `enableWafv2` | Enable WAF V2 addon for ALB | None |
|
||||
| `ingressMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for ingress | None |
|
||||
| `logLevel` | Set the controller log level - info, debug | None |
|
||||
| `metricsBindAddr` | The address the metric endpoint binds to | "" |
|
||||
| `webhookBindPort` | The TCP port the Webhook server binds to | None |
|
||||
| `webhookTLS.caCert` | TLS CA certificate for webhook (auto-generated if not provided) | "" |
|
||||
| `webhookTLS.cert` | TLS certificate for webhook (auto-generated if not provided) | "" |
|
||||
| `webhookTLS.key` | TLS private key for webhook (auto-generated if not provided) | "" |
|
||||
| `webhookNamespaceSelectors` | Namespace selectors for the wekbook | None |
|
||||
| `keepTLSSecret` | Reuse existing TLS Secret during chart upgrade | `true` |
|
||||
| `serviceAnnotations` | Annotations to be added to the provisioned webhook service resource | `{}` |
|
||||
| `serviceMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for service | None |
|
||||
| `targetgroupbindingMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for targetGroupBinding | None |
|
||||
| `targetgroupbindingMaxExponentialBackoffDelay` | Maximum duration of exponential backoff for targetGroupBinding reconcile failures | None |
|
||||
| `syncPeriod` | Period at which the controller forces the repopulation of its local object stores | None |
|
||||
| `watchNamespace` | Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched | None |
|
||||
| `disableIngressClassAnnotation` | Disables the usage of kubernetes.io/ingress.class annotation | None |
|
||||
| `disableIngressGroupNameAnnotation` | Disables the usage of alb.ingress.kubernetes.io/group.name annotation | None |
|
||||
| `defaultSSLPolicy` | Specifies the default SSL policy to use for HTTPS or TLS listeners | None |
|
||||
| `externalManagedTags` | Specifies the list of tag keys on AWS resources that are managed externally | `[]` |
|
||||
| `livenessProbe` | Liveness probe settings for the controller | (see `values.yaml`) |
|
||||
| `env` | Environment variables to set for aws-load-balancer-controller pod | None |
|
||||
| `hostNetwork` | If `true`, use hostNetwork | `false` |
|
||||
| `dnsPolicy` | Set dnsPolicy if required | `ClusterFirst` |
|
||||
| `extraVolumeMounts` | Extra volume mounts for the pod | `[]` |
|
||||
| `extraVolumes` | Extra volumes for the pod | `[]` |
|
||||
| `defaultTags` | Default tags to apply to all AWS resources managed by this controller | `{}` |
|
||||
| `replicaCount` | Number of controller pods to run, only one will be active due to leader election | `2` |
|
||||
| `podDisruptionBudget` | Limit the disruption for controller pods. Require at least 2 controller replicas and 3 worker nodes | `{}` |
|
||||
| `updateStrategy` | Defines the update strategy for the deployment | `{}` |
|
||||
| `enableCertManager` | If enabled, cert-manager issues the webhook certificates instead of the helm template, requires cert-manager and it's CRDs to be installed | `false` |
|
||||
| `enableEndpointSlices` | If enabled, controller uses k8s EndpointSlices instead of Endpoints for IP targets | `false` |
|
||||
| `enableBackendSecurityGroup` | If enabled, controller uses shared security group for backend traffic | `true` |
|
||||
| `backendSecurityGroup` | Backend security group to use instead of auto created one if the feature is enabled | `` |
|
||||
| `disableRestrictedSecurityGroupRules` | If disabled, controller will not specify port range restriction in the backend security group rules | `false` |
|
||||
| `objectSelector.matchExpressions` | Webhook configuration to select specific pods by specifying the expression to be matched | None |
|
||||
| `objectSelector.matchLabels` | Webhook configuration to select specific pods by specifying the key value label pair to be matched | None |
|
||||
| `serviceMonitor.enabled` | Specifies whether a service monitor should be created, requires the ServiceMonitor CRD to be installed | `false` |
|
||||
| `serviceMonitor.additionalLabels` | Labels to add to the service account | `{}` |
|
||||
| `serviceMonitor.interval` | Prometheus scrape interval | `1m` |
|
||||
| `serviceMonitor.namespace` | Namespace in which Prometheus is running | None |
|
||||
| `clusterSecretsPermissions.allowAllSecrets` | If `true`, controller has access to all secrets in the cluster. | `false` |
|
||||
| `controllerConfig.featureGates` | set of `key: value` pairs that describe AWS load balance controller features | `{}` |
|
||||
| `ingressClassConfig.default` | If `true`, the ingressclass will be the default class of the cluster. | `false` |
|
||||
| `enableServiceMutatorWebhook` | If `false`, disable the Service Mutator webhook which makes all new services of type LoadBalancer reconciled by the lb controller | `true` |
|
@ -0,0 +1,590 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.11.1
|
||||
creationTimestamp: null
|
||||
name: ingressclassparams.elbv2.k8s.aws
|
||||
spec:
|
||||
group: elbv2.k8s.aws
|
||||
names:
|
||||
kind: IngressClassParams
|
||||
listKind: IngressClassParamsList
|
||||
plural: ingressclassparams
|
||||
singular: ingressclassparams
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: The Ingress Group name
|
||||
jsonPath: .spec.group.name
|
||||
name: GROUP-NAME
|
||||
type: string
|
||||
- description: The AWS Load Balancer scheme
|
||||
jsonPath: .spec.scheme
|
||||
name: SCHEME
|
||||
type: string
|
||||
- description: The AWS Load Balancer ipAddressType
|
||||
jsonPath: .spec.ipAddressType
|
||||
name: IP-ADDRESS-TYPE
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: IngressClassParams is the Schema for the IngressClassParams API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: IngressClassParamsSpec defines the desired state of IngressClassParams
|
||||
properties:
|
||||
group:
|
||||
description: Group defines the IngressGroup for all Ingresses that
|
||||
belong to IngressClass with this IngressClassParams.
|
||||
properties:
|
||||
name:
|
||||
description: Name is the name of IngressGroup.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
inboundCIDRs:
|
||||
description: InboundCIDRs specifies the CIDRs that are allowed to
|
||||
access the Ingresses that belong to IngressClass with this IngressClassParams.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ipAddressType:
|
||||
description: IPAddressType defines the ip address type for all Ingresses
|
||||
that belong to IngressClass with this IngressClassParams.
|
||||
enum:
|
||||
- ipv4
|
||||
- dualstack
|
||||
type: string
|
||||
loadBalancerAttributes:
|
||||
description: LoadBalancerAttributes define the custom attributes to
|
||||
LoadBalancers for all Ingress that that belong to IngressClass with
|
||||
this IngressClassParams.
|
||||
items:
|
||||
description: Attributes defines custom attributes on resources.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the attribute.
|
||||
type: string
|
||||
value:
|
||||
description: The value of the attribute.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- value
|
||||
type: object
|
||||
type: array
|
||||
namespaceSelector:
|
||||
description: NamespaceSelector restrict the namespaces of Ingresses
|
||||
that are allowed to specify the IngressClass with this IngressClassParams.
|
||||
* if absent or present but empty, it selects all namespaces.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements.
|
||||
The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that
|
||||
contains values, a key, and an operator that relates the key
|
||||
and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to
|
||||
a set of values. Valid operators are In, NotIn, Exists
|
||||
and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the
|
||||
operator is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the values
|
||||
array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single
|
||||
{key,value} in the matchLabels map is equivalent to an element
|
||||
of matchExpressions, whose key field is "key", the operator
|
||||
is "In", and the values array contains only "value". The requirements
|
||||
are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
scheme:
|
||||
description: Scheme defines the scheme for all Ingresses that belong
|
||||
to IngressClass with this IngressClassParams.
|
||||
enum:
|
||||
- internal
|
||||
- internet-facing
|
||||
type: string
|
||||
sslPolicy:
|
||||
description: SSLPolicy specifies the SSL Policy for all Ingresses
|
||||
that belong to IngressClass with this IngressClassParams.
|
||||
type: string
|
||||
subnets:
|
||||
description: Subnets defines the subnets for all Ingresses that belong
|
||||
to IngressClass with this IngressClassParams.
|
||||
properties:
|
||||
ids:
|
||||
description: IDs specify the resource IDs of subnets. Exactly
|
||||
one of this or `tags` must be specified.
|
||||
items:
|
||||
description: SubnetID specifies a subnet ID.
|
||||
pattern: subnet-[0-9a-f]+
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
tags:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
description: Tags specifies subnets in the load balancer's VPC
|
||||
where each tag specified in the map key contains one of the
|
||||
values in the corresponding value list. Exactly one of this
|
||||
or `ids` must be specified.
|
||||
type: object
|
||||
type: object
|
||||
tags:
|
||||
description: Tags defines list of Tags on AWS resources provisioned
|
||||
for Ingresses that belong to IngressClass with this IngressClassParams.
|
||||
items:
|
||||
description: Tag defines a AWS Tag on resources.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the tag.
|
||||
type: string
|
||||
value:
|
||||
description: The value of the tag.
|
||||
type: string
|
||||
required:
|
||||
- key
|
||||
- value
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.11.1
|
||||
creationTimestamp: null
|
||||
name: targetgroupbindings.elbv2.k8s.aws
|
||||
spec:
|
||||
group: elbv2.k8s.aws
|
||||
names:
|
||||
kind: TargetGroupBinding
|
||||
listKind: TargetGroupBindingList
|
||||
plural: targetgroupbindings
|
||||
singular: targetgroupbinding
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: The Kubernetes Service's name
|
||||
jsonPath: .spec.serviceRef.name
|
||||
name: SERVICE-NAME
|
||||
type: string
|
||||
- description: The Kubernetes Service's port
|
||||
jsonPath: .spec.serviceRef.port
|
||||
name: SERVICE-PORT
|
||||
type: string
|
||||
- description: The AWS TargetGroup's TargetType
|
||||
jsonPath: .spec.targetType
|
||||
name: TARGET-TYPE
|
||||
type: string
|
||||
- description: The AWS TargetGroup's Amazon Resource Name
|
||||
jsonPath: .spec.targetGroupARN
|
||||
name: ARN
|
||||
priority: 1
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: TargetGroupBinding is the Schema for the TargetGroupBinding API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding
|
||||
properties:
|
||||
networking:
|
||||
description: networking provides the networking setup for ELBV2 LoadBalancer
|
||||
to access targets in TargetGroup.
|
||||
properties:
|
||||
ingress:
|
||||
description: List of ingress rules to allow ELBV2 LoadBalancer
|
||||
to access targets in TargetGroup.
|
||||
items:
|
||||
properties:
|
||||
from:
|
||||
description: List of peers which should be able to access
|
||||
the targets in TargetGroup. At least one NetworkingPeer
|
||||
should be specified.
|
||||
items:
|
||||
description: NetworkingPeer defines the source/destination
|
||||
peer for networking rules.
|
||||
properties:
|
||||
ipBlock:
|
||||
description: IPBlock defines an IPBlock peer. If specified,
|
||||
none of the other fields can be set.
|
||||
properties:
|
||||
cidr:
|
||||
description: CIDR is the network CIDR. Both IPV4
|
||||
or IPV6 CIDR are accepted.
|
||||
type: string
|
||||
required:
|
||||
- cidr
|
||||
type: object
|
||||
securityGroup:
|
||||
description: SecurityGroup defines a SecurityGroup
|
||||
peer. If specified, none of the other fields can
|
||||
be set.
|
||||
properties:
|
||||
groupID:
|
||||
description: GroupID is the EC2 SecurityGroupID.
|
||||
type: string
|
||||
required:
|
||||
- groupID
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
ports:
|
||||
description: List of ports which should be made accessible
|
||||
on the targets in TargetGroup. If ports is empty or unspecified,
|
||||
it defaults to all ports with TCP.
|
||||
items:
|
||||
properties:
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port which traffic must match. When
|
||||
NodePort endpoints(instance TargetType) is used,
|
||||
this must be a numerical port. When Port endpoints(ip
|
||||
TargetType) is used, this can be either numerical
|
||||
or named port on pods. if port is unspecified, it
|
||||
defaults to all ports.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
description: The protocol which traffic must match.
|
||||
If protocol is unspecified, it defaults to TCP.
|
||||
enum:
|
||||
- TCP
|
||||
- UDP
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- from
|
||||
- ports
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
serviceRef:
|
||||
description: serviceRef is a reference to a Kubernetes Service and
|
||||
ServicePort.
|
||||
properties:
|
||||
name:
|
||||
description: Name is the name of the Service.
|
||||
type: string
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Port is the port of the ServicePort.
|
||||
x-kubernetes-int-or-string: true
|
||||
required:
|
||||
- name
|
||||
- port
|
||||
type: object
|
||||
targetGroupARN:
|
||||
description: targetGroupARN is the Amazon Resource Name (ARN) for
|
||||
the TargetGroup.
|
||||
type: string
|
||||
targetType:
|
||||
description: targetType is the TargetType of TargetGroup. If unspecified,
|
||||
it will be automatically inferred.
|
||||
enum:
|
||||
- instance
|
||||
- ip
|
||||
type: string
|
||||
required:
|
||||
- serviceRef
|
||||
- targetGroupARN
|
||||
type: object
|
||||
status:
|
||||
description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding
|
||||
properties:
|
||||
observedGeneration:
|
||||
description: The generation observed by the TargetGroupBinding controller.
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: false
|
||||
subresources:
|
||||
status: {}
|
||||
- additionalPrinterColumns:
|
||||
- description: The Kubernetes Service's name
|
||||
jsonPath: .spec.serviceRef.name
|
||||
name: SERVICE-NAME
|
||||
type: string
|
||||
- description: The Kubernetes Service's port
|
||||
jsonPath: .spec.serviceRef.port
|
||||
name: SERVICE-PORT
|
||||
type: string
|
||||
- description: The AWS TargetGroup's TargetType
|
||||
jsonPath: .spec.targetType
|
||||
name: TARGET-TYPE
|
||||
type: string
|
||||
- description: The AWS TargetGroup's Amazon Resource Name
|
||||
jsonPath: .spec.targetGroupARN
|
||||
name: ARN
|
||||
priority: 1
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: TargetGroupBinding is the Schema for the TargetGroupBinding API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding
|
||||
properties:
|
||||
ipAddressType:
|
||||
description: ipAddressType specifies whether the target group is of
|
||||
type IPv4 or IPv6. If unspecified, it will be automatically inferred.
|
||||
enum:
|
||||
- ipv4
|
||||
- ipv6
|
||||
type: string
|
||||
networking:
|
||||
description: networking defines the networking rules to allow ELBV2
|
||||
LoadBalancer to access targets in TargetGroup.
|
||||
properties:
|
||||
ingress:
|
||||
description: List of ingress rules to allow ELBV2 LoadBalancer
|
||||
to access targets in TargetGroup.
|
||||
items:
|
||||
description: NetworkingIngressRule defines a particular set
|
||||
of traffic that is allowed to access TargetGroup's targets.
|
||||
properties:
|
||||
from:
|
||||
description: List of peers which should be able to access
|
||||
the targets in TargetGroup. At least one NetworkingPeer
|
||||
should be specified.
|
||||
items:
|
||||
description: NetworkingPeer defines the source/destination
|
||||
peer for networking rules.
|
||||
properties:
|
||||
ipBlock:
|
||||
description: IPBlock defines an IPBlock peer. If specified,
|
||||
none of the other fields can be set.
|
||||
properties:
|
||||
cidr:
|
||||
description: CIDR is the network CIDR. Both IPV4
|
||||
or IPV6 CIDR are accepted.
|
||||
type: string
|
||||
required:
|
||||
- cidr
|
||||
type: object
|
||||
securityGroup:
|
||||
description: SecurityGroup defines a SecurityGroup
|
||||
peer. If specified, none of the other fields can
|
||||
be set.
|
||||
properties:
|
||||
groupID:
|
||||
description: GroupID is the EC2 SecurityGroupID.
|
||||
type: string
|
||||
required:
|
||||
- groupID
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
ports:
|
||||
description: List of ports which should be made accessible
|
||||
on the targets in TargetGroup. If ports is empty or unspecified,
|
||||
it defaults to all ports with TCP.
|
||||
items:
|
||||
description: NetworkingPort defines the port and protocol
|
||||
for networking rules.
|
||||
properties:
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port which traffic must match. When
|
||||
NodePort endpoints(instance TargetType) is used,
|
||||
this must be a numerical port. When Port endpoints(ip
|
||||
TargetType) is used, this can be either numerical
|
||||
or named port on pods. if port is unspecified, it
|
||||
defaults to all ports.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
description: The protocol which traffic must match.
|
||||
If protocol is unspecified, it defaults to TCP.
|
||||
enum:
|
||||
- TCP
|
||||
- UDP
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- from
|
||||
- ports
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
nodeSelector:
|
||||
description: node selector for instance type target groups to only
|
||||
register certain nodes
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements.
|
||||
The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that
|
||||
contains values, a key, and an operator that relates the key
|
||||
and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to
|
||||
a set of values. Valid operators are In, NotIn, Exists
|
||||
and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the
|
||||
operator is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the values
|
||||
array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single
|
||||
{key,value} in the matchLabels map is equivalent to an element
|
||||
of matchExpressions, whose key field is "key", the operator
|
||||
is "In", and the values array contains only "value". The requirements
|
||||
are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
serviceRef:
|
||||
description: serviceRef is a reference to a Kubernetes Service and
|
||||
ServicePort.
|
||||
properties:
|
||||
name:
|
||||
description: Name is the name of the Service.
|
||||
type: string
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Port is the port of the ServicePort.
|
||||
x-kubernetes-int-or-string: true
|
||||
required:
|
||||
- name
|
||||
- port
|
||||
type: object
|
||||
targetGroupARN:
|
||||
description: targetGroupARN is the Amazon Resource Name (ARN) for
|
||||
the TargetGroup.
|
||||
minLength: 1
|
||||
type: string
|
||||
targetType:
|
||||
description: targetType is the TargetType of TargetGroup. If unspecified,
|
||||
it will be automatically inferred.
|
||||
enum:
|
||||
- instance
|
||||
- ip
|
||||
type: string
|
||||
required:
|
||||
- serviceRef
|
||||
- targetGroupARN
|
||||
type: object
|
||||
status:
|
||||
description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding
|
||||
properties:
|
||||
observedGeneration:
|
||||
description: The generation observed by the TargetGroupBinding controller.
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
@ -0,0 +1 @@
|
||||
AWS Load Balancer controller installed!
|
@ -0,0 +1,129 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Chart name prefix for resource names
|
||||
Strip the "-controller" suffix from the default .Chart.Name if the nameOverride is not specified.
|
||||
This enables using a shorter name for the resources, for example aws-load-balancer-webhook.
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.namePrefix" -}}
|
||||
{{- $defaultNamePrefix := .Chart.Name | trimSuffix "-controller" -}}
|
||||
{{- default $defaultNamePrefix .Values.nameOverride | trunc 42 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.labels" -}}
|
||||
helm.sh/chart: {{ include "aws-load-balancer-controller.chart" . }}
|
||||
{{ include "aws-load-balancer-controller.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.additionalLabels }}
|
||||
{{ toYaml .Values.additionalLabels }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "aws-load-balancer-controller.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "aws-load-balancer-controller.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the webhook service
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.webhookService" -}}
|
||||
{{- printf "%s-webhook-service" (include "aws-load-balancer-controller.namePrefix" .) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the webhook cert secret
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.webhookCertSecret" -}}
|
||||
{{- printf "%s-tls" (include "aws-load-balancer-controller.namePrefix" .) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Generate certificates for webhook
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.webhookCerts" -}}
|
||||
{{- $serviceName := (include "aws-load-balancer-controller.webhookService" .) -}}
|
||||
{{- $secretName := (include "aws-load-balancer-controller.webhookCertSecret" .) -}}
|
||||
{{- $secret := lookup "v1" "Secret" .Release.Namespace $secretName -}}
|
||||
{{- if (and .Values.webhookTLS.caCert .Values.webhookTLS.cert .Values.webhookTLS.key) -}}
|
||||
caCert: {{ .Values.webhookTLS.caCert | b64enc }}
|
||||
clientCert: {{ .Values.webhookTLS.cert | b64enc }}
|
||||
clientKey: {{ .Values.webhookTLS.key | b64enc }}
|
||||
{{- else if and .Values.keepTLSSecret $secret -}}
|
||||
caCert: {{ index $secret.data "ca.crt" }}
|
||||
clientCert: {{ index $secret.data "tls.crt" }}
|
||||
clientKey: {{ index $secret.data "tls.key" }}
|
||||
{{- else -}}
|
||||
{{- $altNames := list (printf "%s.%s" $serviceName .Release.Namespace) (printf "%s.%s.svc" $serviceName .Release.Namespace) (printf "%s.%s.svc.%s" $serviceName .Release.Namespace .Values.cluster.dnsDomain) -}}
|
||||
{{- $ca := genCA "aws-load-balancer-controller-ca" 3650 -}}
|
||||
{{- $cert := genSignedCert (include "aws-load-balancer-controller.fullname" .) nil $altNames 3650 $ca -}}
|
||||
caCert: {{ $ca.Cert | b64enc }}
|
||||
clientCert: {{ $cert.Cert | b64enc }}
|
||||
clientKey: {{ $cert.Key | b64enc }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Convert map to comma separated key=value string
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.convertMapToCsv" -}}
|
||||
{{- range $key, $value := . -}} {{ $key }}={{ $value }}, {{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the ingressClassParams
|
||||
*/}}
|
||||
{{- define "aws-load-balancer-controller.ingressClassParamsName" -}}
|
||||
{{ default .Values.ingressClass .Values.ingressClassParams.name }}
|
||||
{{- end -}}
|
@ -0,0 +1,215 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "aws-load-balancer-controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.deploymentAnnotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "aws-load-balancer-controller.selectorLabels" . | nindent 6 }}
|
||||
{{- with .Values.updateStrategy }}
|
||||
strategy:
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if not .Values.serviceMonitor.enabled }}
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ (split ":" .Values.metricsBindAddr)._1 | default 8080 }}"
|
||||
{{- end}}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "aws-load-balancer-controller.serviceAccountName" . }}
|
||||
volumes:
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: {{ template "aws-load-balancer-controller.webhookCertSecret" . }}
|
||||
{{- with .Values.extraVolumes }}
|
||||
{{ toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- if .Values.hostNetwork }}
|
||||
hostNetwork: true
|
||||
{{- end }}
|
||||
{{- if .Values.dnsPolicy }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
args:
|
||||
- --cluster-name={{ required "Chart cannot be installed without a valid clusterName!" .Values.clusterName }}
|
||||
{{- if .Values.ingressClass }}
|
||||
- --ingress-class={{ .Values.ingressClass }}
|
||||
{{- end }}
|
||||
{{- if .Values.region }}
|
||||
- --aws-region={{ .Values.region }}
|
||||
{{- end }}
|
||||
{{- if .Values.vpcId }}
|
||||
- --aws-vpc-id={{ .Values.vpcId }}
|
||||
{{- end }}
|
||||
{{- if .Values.awsApiEndpoints }}
|
||||
- --aws-api-endpoints={{ .Values.awsApiEndpoints }}
|
||||
{{- end }}
|
||||
{{- if .Values.awsApiThrottle }}
|
||||
- --aws-api-throttle={{ join "," .Values.awsApiThrottle }}
|
||||
{{- end }}
|
||||
{{- if .Values.awsMaxRetries }}
|
||||
- --aws-max-retries={{ .Values.awsMaxRetries }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.enablePodReadinessGateInject }}
|
||||
- --enable-pod-readiness-gate-inject={{ .Values.enablePodReadinessGateInject }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.enableShield }}
|
||||
- --enable-shield={{ .Values.enableShield }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.enableWaf }}
|
||||
- --enable-waf={{ .Values.enableWaf }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.enableWafv2 }}
|
||||
- --enable-wafv2={{ .Values.enableWafv2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metricsBindAddr }}
|
||||
- --metrics-bind-addr={{ .Values.metricsBindAddr }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressMaxConcurrentReconciles }}
|
||||
- --ingress-max-concurrent-reconciles={{ .Values.ingressMaxConcurrentReconciles }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMaxConcurrentReconciles }}
|
||||
- --service-max-concurrent-reconciles={{ .Values.serviceMaxConcurrentReconciles }}
|
||||
{{- end }}
|
||||
{{- if .Values.targetgroupbindingMaxConcurrentReconciles }}
|
||||
- --targetgroupbinding-max-concurrent-reconciles={{ .Values.targetgroupbindingMaxConcurrentReconciles }}
|
||||
{{- end }}
|
||||
{{- if .Values.targetgroupbindingMaxExponentialBackoffDelay }}
|
||||
- --targetgroupbinding-max-exponential-backoff-delay={{ .Values.targetgroupbindingMaxExponentialBackoffDelay }}
|
||||
{{- end }}
|
||||
{{- if .Values.logLevel }}
|
||||
- --log-level={{ .Values.logLevel }}
|
||||
{{- end }}
|
||||
{{- if .Values.webhookBindPort }}
|
||||
- --webhook-bind-port={{ .Values.webhookBindPort }}
|
||||
{{- end }}
|
||||
{{- if .Values.syncPeriod }}
|
||||
- --sync-period={{ .Values.syncPeriod }}
|
||||
{{- end }}
|
||||
{{- if .Values.watchNamespace }}
|
||||
- --watch-namespace={{ .Values.watchNamespace }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.disableIngressClassAnnotation }}
|
||||
- --disable-ingress-class-annotation={{ .Values.disableIngressClassAnnotation }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.disableIngressGroupNameAnnotation }}
|
||||
- --disable-ingress-group-name-annotation={{ .Values.disableIngressGroupNameAnnotation }}
|
||||
{{- end }}
|
||||
{{- if .Values.defaultSSLPolicy }}
|
||||
- --default-ssl-policy={{ .Values.defaultSSLPolicy }}
|
||||
{{- end }}
|
||||
{{- if .Values.externalManagedTags }}
|
||||
- --external-managed-tags={{ join "," .Values.externalManagedTags }}
|
||||
{{- end }}
|
||||
{{- if .Values.defaultTags }}
|
||||
- --default-tags={{ include "aws-load-balancer-controller.convertMapToCsv" .Values.defaultTags | trimSuffix "," }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.enableEndpointSlices }}
|
||||
- --enable-endpoint-slices={{ .Values.enableEndpointSlices }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.enableBackendSecurityGroup }}
|
||||
- --enable-backend-security-group={{ .Values.enableBackendSecurityGroup }}
|
||||
{{- end }}
|
||||
{{- if .Values.backendSecurityGroup }}
|
||||
- --backend-security-group={{ .Values.backendSecurityGroup }}
|
||||
{{- end }}
|
||||
{{- if kindIs "bool" .Values.disableRestrictedSecurityGroupRules }}
|
||||
- --disable-restricted-sg-rules={{ .Values.disableRestrictedSecurityGroupRules }}
|
||||
{{- end }}
|
||||
{{- if .Values.controllerConfig.featureGates }}
|
||||
- --feature-gates={{ include "aws-load-balancer-controller.convertMapToCsv" .Values.controllerConfig.featureGates | trimSuffix "," }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.defaultTargetType "instance" }}
|
||||
- --default-target-type={{ .Values.defaultTargetType }}
|
||||
{{- end }}
|
||||
{{- if .Values.env }}
|
||||
env:
|
||||
{{- range $key, $value := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: "{{ $value }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 10 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
{{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: webhook-server
|
||||
containerPort: {{ .Values.webhookBindPort | default 9443 }}
|
||||
protocol: TCP
|
||||
- name: metrics-server
|
||||
containerPort: {{ (split ":" .Values.metricsBindAddr)._1 | default 8080 }}
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 10 }}
|
||||
{{- with .Values.livenessProbe }}
|
||||
livenessProbe:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml .Values.affinity | nindent 8 }}
|
||||
{{- else if .Values.configureDefaultAffinity }}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
values:
|
||||
- {{ include "aws-load-balancer-controller.name" . }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
@ -0,0 +1,41 @@
|
||||
{{- /*
|
||||
[caution] AWSLoadBalancerController <= v2.4.2 expects referenced IngressClassParams to be created before IngressClass.
|
||||
We use a list here to force Helm create IngressClassParams(if any) before apply any IngressClass changes.
|
||||
*/}}
|
||||
{{- if .Values.createIngressClassResource }}
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
metadata:
|
||||
name: ingress-class
|
||||
items:
|
||||
{{- if .Values.ingressClassParams.create }}
|
||||
- apiVersion: elbv2.k8s.aws/v1beta1
|
||||
kind: IngressClassParams
|
||||
metadata:
|
||||
name: {{ include "aws-load-balancer-controller.ingressClassParamsName" . }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 6 }}
|
||||
{{- with .Values.ingressClassParams.spec }}
|
||||
spec:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
name: {{ .Values.ingressClass }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 6 }}
|
||||
{{- if .Values.ingressClassConfig.default }}
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
controller: ingress.k8s.aws/alb
|
||||
{{- if or .Values.ingressClassParams.name (and .Values.ingressClassParams.create .Values.ingressClassParams.spec) }}
|
||||
parameters:
|
||||
apiGroup: elbv2.k8s.aws
|
||||
kind: IngressClassParams
|
||||
name: {{ include "aws-load-balancer-controller.ingressClassParamsName" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -0,0 +1,14 @@
|
||||
{{- if and .Values.podDisruptionBudget (gt (int .Values.replicaCount) 1) }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "aws-load-balancer-controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "aws-load-balancer-controller.selectorLabels" . | nindent 6 }}
|
||||
{{- toYaml .Values.podDisruptionBudget | nindent 2 }}
|
||||
{{- end }}
|
@ -0,0 +1,104 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}-leader-election-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [configmaps]
|
||||
verbs: [create]
|
||||
- apiGroups: [""]
|
||||
resources: [configmaps]
|
||||
resourceNames: [aws-load-balancer-controller-leader]
|
||||
verbs: [get, patch, update]
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
resourceNames:
|
||||
- aws-load-balancer-controller-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}-leader-election-rolebinding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}-leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "aws-load-balancer-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}-role
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: ["elbv2.k8s.aws"]
|
||||
resources: [targetgroupbindings]
|
||||
verbs: [create, delete, get, list, patch, update, watch]
|
||||
- apiGroups: ["elbv2.k8s.aws"]
|
||||
resources: [ingressclassparams]
|
||||
verbs: [get, list, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [events]
|
||||
verbs: [create, patch]
|
||||
- apiGroups: [""]
|
||||
resources: [pods]
|
||||
verbs: [get, list, watch]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: [ingressclasses]
|
||||
verbs: [get, list, watch]
|
||||
- apiGroups: ["", "extensions", "networking.k8s.io"]
|
||||
resources: [services, ingresses]
|
||||
verbs: [get, list, patch, update, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [nodes, namespaces, endpoints]
|
||||
verbs: [get, list, watch]
|
||||
{{- if .Values.clusterSecretsPermissions.allowAllSecrets }}
|
||||
- apiGroups: [""]
|
||||
resources: [secrets]
|
||||
verbs: [get, list, watch]
|
||||
{{- end }}
|
||||
- apiGroups: ["elbv2.k8s.aws", "", "extensions", "networking.k8s.io"]
|
||||
resources: [targetgroupbindings/status, pods/status, services/status, ingresses/status]
|
||||
verbs: [update, patch]
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: [endpointslices]
|
||||
verbs: [get, list, watch]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}-rolebinding
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "aws-load-balancer-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
@ -0,0 +1,41 @@
|
||||
{{- if.Values.serviceMonitor.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- with .Values.serviceAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
name: metrics-server
|
||||
targetPort: metrics-server
|
||||
selector:
|
||||
{{- include "aws-load-balancer-controller.selectorLabels" . | nindent 4 }}
|
||||
---
|
||||
{{- end }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- with .Values.serviceAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: webhook
|
||||
prometheus.io/service-monitor: "false"
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
name: webhook-server
|
||||
targetPort: webhook-server
|
||||
selector:
|
||||
{{- include "aws-load-balancer-controller.selectorLabels" . | nindent 4 }}
|
@ -0,0 +1,18 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "aws-load-balancer-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
{{- with .Values.serviceAccount.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
@ -0,0 +1,35 @@
|
||||
{{- if.Values.serviceMonitor.enabled -}}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "aws-load-balancer-controller.fullname" . }}
|
||||
{{- if .Values.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.serviceMonitor.namespace }}
|
||||
{{- else }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: {{ .Release.Name }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "aws-load-balancer-controller.selectorLabels" . | nindent 6 }}
|
||||
matchExpressions:
|
||||
- key: prometheus.io/service-monitor
|
||||
operator: NotIn
|
||||
values:
|
||||
- "false"
|
||||
endpoints:
|
||||
- port: metrics-server
|
||||
path: /metrics
|
||||
{{- with .Values.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
@ -0,0 +1,240 @@
|
||||
{{ $tls := fromYaml ( include "aws-load-balancer-controller.webhookCerts" . ) }}
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
{{- if $.Values.enableCertManager }}
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ template "aws-load-balancer-controller.namePrefix" . }}-serving-cert
|
||||
{{- end }}
|
||||
name: {{ include "aws-load-balancer-controller.namePrefix" . }}-webhook
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
{{ if not $.Values.enableCertManager -}}
|
||||
caBundle: {{ $tls.caCert }}
|
||||
{{ end }}
|
||||
service:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
path: /mutate-v1-pod
|
||||
failurePolicy: Fail
|
||||
name: mpod.elbv2.k8s.aws
|
||||
admissionReviewVersions:
|
||||
- v1beta1
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
{{ if .Values.webhookNamespaceSelectors }}
|
||||
{{ toYaml .Values.webhookNamespaceSelectors | nindent 4 }}
|
||||
{{ else }}
|
||||
- key: elbv2.k8s.aws/pod-readiness-gate-inject
|
||||
operator: In
|
||||
values:
|
||||
- enabled
|
||||
{{ end }}
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: NotIn
|
||||
values:
|
||||
- {{ include "aws-load-balancer-controller.name" . }}
|
||||
{{- if .Values.objectSelector.matchExpressions }}
|
||||
{{- toYaml .Values.objectSelector.matchExpressions | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.objectSelector.matchLabels }}
|
||||
matchLabels:
|
||||
{{- toYaml .Values.objectSelector.matchLabels | nindent 6 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- pods
|
||||
sideEffects: None
|
||||
{{- if .Values.enableServiceMutatorWebhook }}
|
||||
- clientConfig:
|
||||
{{ if not $.Values.enableCertManager -}}
|
||||
caBundle: {{ $tls.caCert }}
|
||||
{{ end }}
|
||||
service:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
path: /mutate-v1-service
|
||||
failurePolicy: Fail
|
||||
name: mservice.elbv2.k8s.aws
|
||||
admissionReviewVersions:
|
||||
- v1beta1
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: NotIn
|
||||
values:
|
||||
- {{ include "aws-load-balancer-controller.name" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- services
|
||||
sideEffects: None
|
||||
{{- end }}
|
||||
- clientConfig:
|
||||
{{ if not $.Values.enableCertManager -}}
|
||||
caBundle: {{ $tls.caCert }}
|
||||
{{ end }}
|
||||
service:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
path: /mutate-elbv2-k8s-aws-v1beta1-targetgroupbinding
|
||||
failurePolicy: Fail
|
||||
name: mtargetgroupbinding.elbv2.k8s.aws
|
||||
admissionReviewVersions:
|
||||
- v1beta1
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elbv2.k8s.aws
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- targetgroupbindings
|
||||
sideEffects: None
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
{{- if $.Values.enableCertManager }}
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ template "aws-load-balancer-controller.namePrefix" . }}-serving-cert
|
||||
{{- end }}
|
||||
name: {{ include "aws-load-balancer-controller.namePrefix" . }}-webhook
|
||||
labels:
|
||||
{{- include "aws-load-balancer-controller.labels" . | nindent 4 }}
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
{{ if not $.Values.enableCertManager -}}
|
||||
caBundle: {{ $tls.caCert }}
|
||||
{{ end }}
|
||||
service:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
path: /validate-elbv2-k8s-aws-v1beta1-ingressclassparams
|
||||
failurePolicy: Fail
|
||||
name: vingressclassparams.elbv2.k8s.aws
|
||||
admissionReviewVersions:
|
||||
- v1beta1
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: NotIn
|
||||
values:
|
||||
- {{ include "aws-load-balancer-controller.name" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elbv2.k8s.aws
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingressclassparams
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
{{ if not $.Values.enableCertManager -}}
|
||||
caBundle: {{ $tls.caCert }}
|
||||
{{ end }}
|
||||
service:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
path: /validate-elbv2-k8s-aws-v1beta1-targetgroupbinding
|
||||
failurePolicy: Fail
|
||||
name: vtargetgroupbinding.elbv2.k8s.aws
|
||||
admissionReviewVersions:
|
||||
- v1beta1
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elbv2.k8s.aws
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- targetgroupbindings
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
{{ if not $.Values.enableCertManager -}}
|
||||
caBundle: {{ $tls.caCert }}
|
||||
{{ end }}
|
||||
service:
|
||||
name: {{ template "aws-load-balancer-controller.webhookService" . }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
path: /validate-networking-v1-ingress
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Equivalent
|
||||
name: vingress.elbv2.k8s.aws
|
||||
admissionReviewVersions:
|
||||
- v1beta1
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
sideEffects: None
|
||||
---
|
||||
{{- if not $.Values.enableCertManager }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.webhookCertSecret" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ include "aws-load-balancer-controller.labels" . | indent 4 }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
ca.crt: {{ $tls.caCert }}
|
||||
tls.crt: {{ $tls.clientCert }}
|
||||
tls.key: {{ $tls.clientKey }}
|
||||
{{- else }}
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.namePrefix" . }}-serving-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ include "aws-load-balancer-controller.labels" . | indent 4 }}
|
||||
spec:
|
||||
dnsNames:
|
||||
- {{ template "aws-load-balancer-controller.webhookService" . }}.{{ .Release.Namespace }}.svc
|
||||
- {{ template "aws-load-balancer-controller.webhookService" . }}.{{ .Release.Namespace }}.svc.{{ .Values.cluster.dnsDomain }}
|
||||
issuerRef:
|
||||
kind: Issuer
|
||||
name: {{ template "aws-load-balancer-controller.namePrefix" . }}-selfsigned-issuer
|
||||
secretName: {{ template "aws-load-balancer-controller.webhookCertSecret" . }}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ template "aws-load-balancer-controller.namePrefix" . }}-selfsigned-issuer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ include "aws-load-balancer-controller.labels" . | indent 4 }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
{{- end }}
|
@ -0,0 +1,353 @@
|
||||
# Default values for aws-load-balancer-controller.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 2
|
||||
|
||||
image:
|
||||
repository: public.ecr.aws/eks/aws-load-balancer-controller
|
||||
tag: v2.5.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
# Automount API credentials for a Service Account.
|
||||
automountServiceAccountToken: true
|
||||
# List of image pull secrets to add to the Service Account.
|
||||
imagePullSecrets:
|
||||
# - name: docker
|
||||
|
||||
rbac:
|
||||
# Specifies whether rbac resources should be created
|
||||
create: true
|
||||
|
||||
podSecurityContext:
|
||||
fsGroup: 65534
|
||||
|
||||
securityContext:
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Time period for the controller pod to do a graceful shutdown
|
||||
terminationGracePeriodSeconds: 10
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# priorityClassName specifies the PriorityClass to indicate the importance of controller pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
# affinity specifies a custom affinity for the controller pods
|
||||
affinity: {}
|
||||
|
||||
# configureDefaultAffinity specifies whether to configure a default affinity for the controller pods to prevent
|
||||
# co-location on the same node. This will get ignored if you specify a custom affinity configuration.
|
||||
configureDefaultAffinity: true
|
||||
|
||||
# topologySpreadConstraints is a stable feature of k8s v1.19 which provides the ability to
|
||||
# control how Pods are spread across your cluster among failure-domains such as regions, zones,
|
||||
# nodes, and other user-defined topology domains.
|
||||
#
|
||||
# more details here: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
topologySpreadConstraints: {}
|
||||
|
||||
updateStrategy: {}
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 1
|
||||
# maxUnavailable: 1
|
||||
|
||||
# serviceAnnotations contains annotations to be added to the provisioned webhook service resource
|
||||
serviceAnnotations: {}
|
||||
|
||||
# deploymentAnnotations contains annotations for the controller deployment
|
||||
deploymentAnnotations: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
# additionalLabels -- Labels to add to each object of the chart.
|
||||
additionalLabels: {}
|
||||
|
||||
# Enable cert-manager
|
||||
enableCertManager: false
|
||||
|
||||
# The name of the Kubernetes cluster. A non-empty value is required
|
||||
clusterName:
|
||||
|
||||
# cluster contains configurations specific to the kubernetes cluster
|
||||
cluster:
|
||||
# Cluster DNS domain (required for requesting TLS certificates)
|
||||
dnsDomain: cluster.local
|
||||
|
||||
# The ingress class this controller will satisfy. If not specified, controller will match all
|
||||
# ingresses without ingress class annotation and ingresses of type alb
|
||||
ingressClass: alb
|
||||
|
||||
# ingressClassParams specify the IngressCLassParams that enforce settings for a set of Ingresses when using with ingress Controller.
|
||||
ingressClassParams:
|
||||
create: true
|
||||
# The name of ingressClassParams resource will be referred in ingressClass
|
||||
name:
|
||||
spec: {}
|
||||
# Due to dependency issue, the validation webhook ignores this particular ingressClassParams resource.
|
||||
# We recommend creating ingressClassParams resources separately after installing this chart and the
|
||||
# controller is functional.
|
||||
#
|
||||
# You can set the specifications in the `helm install` command through `--set` or `--set-string`
|
||||
# If you do want to specify in the values.yaml, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'spec:'
|
||||
#
|
||||
# namespaceSelector:
|
||||
# matchLabels:
|
||||
# group:
|
||||
# scheme:
|
||||
# ipAddressType:
|
||||
# tags:
|
||||
# loadBalancerAttributes:
|
||||
# - key:
|
||||
# value:
|
||||
|
||||
# To use IngressClass resource instead of annotation, before you need to install the IngressClass resource pointing to controller.
|
||||
# If specified as true, the IngressClass resource will be created.
|
||||
createIngressClassResource: true
|
||||
|
||||
# The AWS region for the kubernetes cluster. Set to use KIAM or kube2iam for example.
|
||||
region:
|
||||
|
||||
# The VPC ID for the Kubernetes cluster. Set this manually when your pods are unable to use the metadata service to determine this automatically
|
||||
vpcId:
|
||||
|
||||
# Custom AWS API Endpoints (serviceID1=URL1,serviceID2=URL2)
|
||||
awsApiEndpoints:
|
||||
|
||||
# awsApiThrottle specifies custom AWS API throttle settings (serviceID1:operationRegex1=rate:burst,serviceID2:operationRegex2=rate:burst)
|
||||
# example: --set awsApiThrottle="{Elastic Load Balancing v2:RegisterTargets|DeregisterTargets=4:20,Elastic Load Balancing v2:.*=10:40}"
|
||||
awsApiThrottle:
|
||||
|
||||
# Maximum retries for AWS APIs (default 10)
|
||||
awsMaxRetries:
|
||||
|
||||
# Default target type. Used as the default value of the "alb.ingress.kubernetes.io/target-type" and
|
||||
# "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" annotations.
|
||||
# Possible values are "ip" and "instance"
|
||||
# The value "ip" should be used for ENI-based CNIs, such as the Amazon VPC CNI,
|
||||
# Calico with encapsulation disabled, or Cilium with masquerading disabled.
|
||||
# The value "instance" should be used for overlay-based CNIs, such as Calico in VXLAN or IPIP mode or
|
||||
# Cilium with masquerading enabled.
|
||||
defaultTargetType: instance
|
||||
|
||||
# If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods (default true)
|
||||
enablePodReadinessGateInject:
|
||||
|
||||
# Enable Shield addon for ALB (default true)
|
||||
enableShield:
|
||||
|
||||
# Enable WAF addon for ALB (default true)
|
||||
enableWaf:
|
||||
|
||||
# Enable WAF V2 addon for ALB (default true)
|
||||
enableWafv2:
|
||||
|
||||
# Maximum number of concurrently running reconcile loops for ingress (default 3)
|
||||
ingressMaxConcurrentReconciles:
|
||||
|
||||
# Set the controller log level - info(default), debug (default "info")
|
||||
logLevel:
|
||||
|
||||
# The address the metric endpoint binds to. (default ":8080")
|
||||
metricsBindAddr: ""
|
||||
|
||||
# The TCP port the Webhook server binds to. (default 9443)
|
||||
webhookBindPort:
|
||||
|
||||
# webhookTLS specifies TLS cert/key for the webhook
|
||||
webhookTLS:
|
||||
caCert:
|
||||
cert:
|
||||
key:
|
||||
|
||||
# array of namespace selectors for the webhook
|
||||
webhookNamespaceSelectors:
|
||||
# - key: elbv2.k8s.aws/pod-readiness-gate-inject
|
||||
# operator: In
|
||||
# values:
|
||||
# - enabled
|
||||
|
||||
# keepTLSSecret specifies whether to reuse existing TLS secret for chart upgrade
|
||||
keepTLSSecret: true
|
||||
|
||||
# Maximum number of concurrently running reconcile loops for service (default 3)
|
||||
serviceMaxConcurrentReconciles:
|
||||
|
||||
# Maximum number of concurrently running reconcile loops for targetGroupBinding
|
||||
targetgroupbindingMaxConcurrentReconciles:
|
||||
|
||||
# Maximum duration of exponential backoff for targetGroupBinding reconcile failures
|
||||
targetgroupbindingMaxExponentialBackoffDelay:
|
||||
|
||||
# Period at which the controller forces the repopulation of its local object stores. (default 1h0m0s)
|
||||
syncPeriod:
|
||||
|
||||
# Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched.
|
||||
watchNamespace:
|
||||
|
||||
# disableIngressClassAnnotation disables the usage of kubernetes.io/ingress.class annotation, false by default
|
||||
disableIngressClassAnnotation:
|
||||
|
||||
# disableIngressGroupNameAnnotation disables the usage of alb.ingress.kubernetes.io/group.name annotation, false by default
|
||||
disableIngressGroupNameAnnotation:
|
||||
|
||||
# defaultSSLPolicy specifies the default SSL policy to use for TLS/HTTPS listeners
|
||||
defaultSSLPolicy:
|
||||
|
||||
# Liveness probe configuration for the controller
|
||||
livenessProbe:
|
||||
failureThreshold: 2
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 61779
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
|
||||
# Environment variables to set for aws-load-balancer-controller pod.
|
||||
# We strongly discourage programming access credentials in the controller environment. You should setup IRSA or
|
||||
# comparable solutions like kube2iam, kiam etc instead.
|
||||
env:
|
||||
# ENV_1: ""
|
||||
# ENV_2: ""
|
||||
|
||||
# Specifies if aws-load-balancer-controller should be started in hostNetwork mode.
|
||||
#
|
||||
# This is required if using a custom CNI where the managed control plane nodes are unable to initiate
|
||||
# network connections to the pods, for example using Calico CNI plugin on EKS. This is not required or
|
||||
# recommended if using the Amazon VPC CNI plugin.
|
||||
hostNetwork: false
|
||||
|
||||
# Specifies the dnsPolicy that should be used for pods in the deployment
|
||||
#
|
||||
# This may need to be used to be changed given certain conditions. For instance, if one uses the cilium CNI
|
||||
# with certain settings, one may need to set `hostNetwork: true` and webhooks won't work unless `dnsPolicy`
|
||||
# is set to `ClusterFirstWithHostNet`. See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||
dnsPolicy:
|
||||
|
||||
# extraVolumeMounts are the additional volume mounts. This enables setting up IRSA on non-EKS Kubernetes cluster
|
||||
extraVolumeMounts:
|
||||
# - name: aws-iam-token
|
||||
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
|
||||
# readOnly: true
|
||||
|
||||
# extraVolumes for the extraVolumeMounts. Useful to mount a projected service account token for example.
|
||||
extraVolumes:
|
||||
# - name: aws-iam-token
|
||||
# projected:
|
||||
# defaultMode: 420
|
||||
# sources:
|
||||
# - serviceAccountToken:
|
||||
# audience: sts.amazonaws.com
|
||||
# expirationSeconds: 86400
|
||||
# path: token
|
||||
|
||||
# defaultTags are the tags to apply to all AWS resources managed by this controller
|
||||
defaultTags: {}
|
||||
# default_tag1: value1
|
||||
# default_tag2: value2
|
||||
|
||||
# podDisruptionBudget specifies the disruption budget for the controller pods.
|
||||
# Disruption budget will be configured only when the replicaCount is greater than 1
|
||||
podDisruptionBudget: {}
|
||||
# maxUnavailable: 1
|
||||
|
||||
# externalManagedTags is the list of tag keys on AWS resources that will be managed externally
|
||||
externalManagedTags: []
|
||||
|
||||
# enableEndpointSlices enables k8s EndpointSlices for IP targets instead of Endpoints (default false)
|
||||
enableEndpointSlices:
|
||||
|
||||
# enableBackendSecurityGroup enables shared security group for backend traffic (default true)
|
||||
enableBackendSecurityGroup:
|
||||
|
||||
# backendSecurityGroup specifies backend security group id (default controller auto create backend security group)
|
||||
backendSecurityGroup:
|
||||
|
||||
# disableRestrictedSecurityGroupRules specifies whether to disable creating port-range restricted security group rules for traffic
|
||||
disableRestrictedSecurityGroupRules:
|
||||
|
||||
# controllerConfig specifies controller configuration
|
||||
controllerConfig:
|
||||
# featureGates set of key: value pairs that describe AWS load balance controller features
|
||||
featureGates: {}
|
||||
# ListenerRulesTagging: true
|
||||
# WeightedTargetGroups: true
|
||||
# ServiceTypeLoadBalancerOnly: false
|
||||
# EndpointsFailOpen: true
|
||||
# EnableServiceController: true
|
||||
# EnableIPTargetType: true
|
||||
# SubnetsClusterTagCheck: true
|
||||
# NLBHealthCheckAdvancedConfig: true
|
||||
|
||||
# objectSelector for webhook
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
# - key: <key>
|
||||
# operator: <operator>
|
||||
# values:
|
||||
# - <value>
|
||||
matchLabels:
|
||||
# key: value
|
||||
|
||||
serviceMonitor:
|
||||
# Specifies whether a service monitor should be created
|
||||
enabled: false
|
||||
# Labels to add to the service account
|
||||
additionalLabels: {}
|
||||
# Prometheus scrape interval
|
||||
interval: 1m
|
||||
# Namespace to create the service monitor in
|
||||
namespace:
|
||||
|
||||
# clusterSecretsPermissions lets you configure RBAC permissions for secret resources
|
||||
# Access to secrets resource is required only if you use the OIDC feature, and instead of
|
||||
# enabling access to all secrets, we recommend configuring namespaced role/rolebinding.
|
||||
# This option is for backwards compatibility only, and will potentially be deprecated in future.
|
||||
clusterSecretsPermissions:
|
||||
# allowAllSecrets allows the controller to access all secrets in the cluster.
|
||||
# This is to get backwards compatible behavior, but *NOT* recommended for security reasons
|
||||
allowAllSecrets: false
|
||||
|
||||
# ingressClassConfig contains configurations specific to the ingress class
|
||||
ingressClassConfig:
|
||||
default: false
|
||||
|
||||
# enableServiceMutatorWebhook allows you enable the webhook which makes this controller the default for all new services of type LoadBalancer
|
||||
enableServiceMutatorWebhook: true
|
@ -23,6 +23,7 @@ spec:
|
||||
- --leader-elect=true
|
||||
- --allocate-node-cidrs=false
|
||||
- --configure-cloud-routes=false
|
||||
- --controllers=*,-service
|
||||
- -v=2
|
||||
volumeMounts:
|
||||
- name: etckubernetes
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
@ -104,12 +105,12 @@ func (c *Client) shouldUpgrade(releaseName, newVersion string, force bool) error
|
||||
// Upgrade runs a helm-upgrade on all deployments that are managed via Helm.
|
||||
// If the CLI receives an interrupt signal it will cancel the context.
|
||||
// Canceling the context will prompt helm to abort and roll back the ongoing upgrade.
|
||||
func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive, force bool, upgradeID string) error {
|
||||
func (c *Client) Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string) error {
|
||||
upgradeErrs := []error{}
|
||||
upgradeReleases := []*chart.Chart{}
|
||||
newReleases := []*chart.Chart{}
|
||||
|
||||
for _, info := range []chartInfo{ciliumInfo, certManagerInfo, constellationOperatorsInfo, constellationServicesInfo, csiInfo} {
|
||||
for _, info := range []chartInfo{ciliumInfo, certManagerInfo, constellationOperatorsInfo, constellationServicesInfo, csiInfo, awsLBControllerInfo} {
|
||||
c.log.Debugf("Checking release %s", info.releaseName)
|
||||
chart, err := loadChartsDir(helmFS, info.path)
|
||||
if err != nil {
|
||||
@ -165,7 +166,7 @@ func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout tim
|
||||
|
||||
for _, chart := range upgradeReleases {
|
||||
c.log.Debugf("Upgrading release %s", chart.Metadata.Name)
|
||||
if err := c.upgradeRelease(ctx, timeout, config, chart); err != nil {
|
||||
if err := c.upgradeRelease(ctx, timeout, config, idFile, chart); err != nil {
|
||||
return fmt.Errorf("upgrading %s: %w", chart.Metadata.Name, err)
|
||||
}
|
||||
}
|
||||
@ -178,7 +179,7 @@ func (c *Client) Upgrade(ctx context.Context, config *config.Config, timeout tim
|
||||
// it should be done in a separate loop, instead of moving this one up.
|
||||
for _, chart := range newReleases {
|
||||
c.log.Debugf("Installing new release %s", chart.Metadata.Name)
|
||||
if err := c.installNewRelease(ctx, timeout, config, chart); err != nil {
|
||||
if err := c.installNewRelease(ctx, timeout, config, idFile, chart); err != nil {
|
||||
return fmt.Errorf("upgrading %s: %w", chart.Metadata.Name, err)
|
||||
}
|
||||
}
|
||||
@ -204,12 +205,17 @@ func (c *Client) Versions() (ServiceVersions, error) {
|
||||
if err != nil {
|
||||
return ServiceVersions{}, fmt.Errorf("getting %s version: %w", constellationServicesInfo.releaseName, err)
|
||||
}
|
||||
awsLBVersion, err := c.currentVersion(awsLBControllerInfo.releaseName)
|
||||
if !errors.Is(err, errReleaseNotFound) {
|
||||
return ServiceVersions{}, fmt.Errorf("getting %s version: %w", awsLBControllerInfo.releaseName, err)
|
||||
}
|
||||
|
||||
return ServiceVersions{
|
||||
cilium: compatibility.EnsurePrefixV(ciliumVersion),
|
||||
certManager: compatibility.EnsurePrefixV(certManagerVersion),
|
||||
constellationOperators: compatibility.EnsurePrefixV(operatorsVersion),
|
||||
constellationServices: compatibility.EnsurePrefixV(servicesVersion),
|
||||
awsLBController: compatibility.EnsurePrefixV(awsLBVersion),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -240,6 +246,7 @@ type ServiceVersions struct {
|
||||
certManager string
|
||||
constellationOperators string
|
||||
constellationServices string
|
||||
awsLBController string
|
||||
}
|
||||
|
||||
// NewServiceVersions returns a new ServiceVersions struct.
|
||||
@ -274,9 +281,9 @@ func (s ServiceVersions) ConstellationServices() string {
|
||||
|
||||
// installNewRelease installs a previously not installed release on the cluster.
|
||||
func (c *Client) installNewRelease(
|
||||
ctx context.Context, timeout time.Duration, conf *config.Config, chart *chart.Chart,
|
||||
ctx context.Context, timeout time.Duration, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
|
||||
) error {
|
||||
releaseName, values, err := c.loadUpgradeValues(ctx, conf, chart)
|
||||
releaseName, values, err := c.loadUpgradeValues(ctx, conf, idFile, chart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading values: %w", err)
|
||||
}
|
||||
@ -285,9 +292,9 @@ func (c *Client) installNewRelease(
|
||||
|
||||
// upgradeRelease upgrades a release running on the cluster.
|
||||
func (c *Client) upgradeRelease(
|
||||
ctx context.Context, timeout time.Duration, conf *config.Config, chart *chart.Chart,
|
||||
ctx context.Context, timeout time.Duration, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
|
||||
) error {
|
||||
releaseName, values, err := c.loadUpgradeValues(ctx, conf, chart)
|
||||
releaseName, values, err := c.loadUpgradeValues(ctx, conf, idFile, chart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading values: %w", err)
|
||||
}
|
||||
@ -301,7 +308,7 @@ func (c *Client) upgradeRelease(
|
||||
}
|
||||
|
||||
// loadUpgradeValues loads values for a chart required for running an upgrade.
|
||||
func (c *Client) loadUpgradeValues(ctx context.Context, conf *config.Config, chart *chart.Chart,
|
||||
func (c *Client) loadUpgradeValues(ctx context.Context, conf *config.Config, idFile clusterid.File, chart *chart.Chart,
|
||||
) (string, map[string]any, error) {
|
||||
// We need to load all values that can be statically loaded before merging them with the cluster
|
||||
// values. Otherwise the templates are not rendered correctly.
|
||||
@ -309,7 +316,11 @@ func (c *Client) loadUpgradeValues(ctx context.Context, conf *config.Config, cha
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("validating k8s version: %s", conf.KubernetesVersion)
|
||||
}
|
||||
loader := NewLoader(conf.GetProvider(), k8sVersion)
|
||||
|
||||
c.log.Debugf("Checking cluster ID file")
|
||||
clusterName := clusterid.GetClusterName(conf, idFile)
|
||||
|
||||
loader := NewLoader(conf.GetProvider(), k8sVersion, clusterName)
|
||||
|
||||
var values map[string]any
|
||||
var releaseName string
|
||||
@ -342,6 +353,9 @@ func (c *Client) loadUpgradeValues(ctx context.Context, conf *config.Config, cha
|
||||
case csiInfo.chartName:
|
||||
releaseName = csiInfo.releaseName
|
||||
values = loader.loadCSIValues()
|
||||
case awsLBControllerInfo.chartName:
|
||||
releaseName = awsLBControllerInfo.releaseName
|
||||
values = loader.loadAWSLBControllerValues()
|
||||
default:
|
||||
return "", nil, fmt.Errorf("unknown chart name: %s", chart.Metadata.Name)
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/internal/compatibility"
|
||||
"github.com/edgelesssys/constellation/v2/internal/config"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
@ -77,7 +78,7 @@ func TestUpgradeRelease(t *testing.T) {
|
||||
|
||||
chart, err := loadChartsDir(helmFS, certManagerInfo.path)
|
||||
require.NoError(err)
|
||||
err = client.upgradeRelease(context.Background(), 0, config.Default(), chart)
|
||||
err = client.upgradeRelease(context.Background(), 0, config.Default(), clusterid.File{UID: "test"}, chart)
|
||||
if tc.wantError {
|
||||
assert.Error(err)
|
||||
return
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
//go:generate ./generateCilium.sh
|
||||
//go:generate ./update-csi-charts.sh
|
||||
//go:generate ./generateCertManager.sh
|
||||
//go:generate ./update-aws-load-balancer-chart.sh
|
||||
|
||||
//go:embed all:charts/*
|
||||
var helmFS embed.FS
|
||||
@ -51,6 +52,7 @@ var (
|
||||
certManagerInfo = chartInfo{releaseName: "cert-manager", chartName: "cert-manager", path: "charts/cert-manager"}
|
||||
constellationOperatorsInfo = chartInfo{releaseName: "constellation-operators", chartName: "constellation-operators", path: "charts/edgeless/operators"}
|
||||
constellationServicesInfo = chartInfo{releaseName: "constellation-services", chartName: "constellation-services", path: "charts/edgeless/constellation-services"}
|
||||
awsLBControllerInfo = chartInfo{releaseName: "aws-load-balancer-controller", chartName: "aws-load-balancer-controller", path: "charts/aws-load-balancer-controller"}
|
||||
csiInfo = chartInfo{releaseName: "constellation-csi", chartName: "constellation-csi", path: "charts/edgeless/csi"}
|
||||
)
|
||||
|
||||
@ -59,18 +61,19 @@ type ChartLoader struct {
|
||||
csp cloudprovider.Provider
|
||||
joinServiceImage string
|
||||
keyServiceImage string
|
||||
ccmImage string
|
||||
cnmImage string
|
||||
ccmImage string // cloud controller manager image
|
||||
azureCNMImage string // Azure cloud node manager image
|
||||
autoscalerImage string
|
||||
verificationServiceImage string
|
||||
gcpGuestAgentImage string
|
||||
konnectivityImage string
|
||||
constellationOperatorImage string
|
||||
nodeMaintenanceOperatorImage string
|
||||
clusterName string
|
||||
}
|
||||
|
||||
// NewLoader creates a new ChartLoader.
|
||||
func NewLoader(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion) *ChartLoader {
|
||||
func NewLoader(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion, clusterName string) *ChartLoader {
|
||||
var ccmImage, cnmImage string
|
||||
switch csp {
|
||||
case cloudprovider.AWS:
|
||||
@ -91,13 +94,14 @@ func NewLoader(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion)
|
||||
joinServiceImage: imageversion.JoinService("", ""),
|
||||
keyServiceImage: imageversion.KeyService("", ""),
|
||||
ccmImage: ccmImage,
|
||||
cnmImage: cnmImage,
|
||||
azureCNMImage: cnmImage,
|
||||
autoscalerImage: versions.VersionConfigs[k8sVersion].ClusterAutoscalerImage,
|
||||
verificationServiceImage: imageversion.VerificationService("", ""),
|
||||
gcpGuestAgentImage: versions.GcpGuestImage,
|
||||
konnectivityImage: versions.KonnectivityAgentImage,
|
||||
constellationOperatorImage: imageversion.ConstellationNodeOperator("", ""),
|
||||
nodeMaintenanceOperatorImage: versions.NodeMaintenanceOperatorImage,
|
||||
clusterName: clusterName,
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,7 +131,14 @@ func (i *ChartLoader) Load(config *config.Config, conformanceMode bool, helmWait
|
||||
return nil, fmt.Errorf("extending constellation-services values: %w", err)
|
||||
}
|
||||
|
||||
releases := helm.Releases{Cilium: ciliumRelease, CertManager: certManagerRelease, Operators: operatorRelease, ConstellationServices: conServicesRelease}
|
||||
releases := helm.Releases{Cilium: ciliumRelease, CertManager: certManagerRelease, ConstellationOperators: operatorRelease, ConstellationServices: conServicesRelease}
|
||||
if config.HasProvider(cloudprovider.AWS) {
|
||||
awsRelease, err := i.loadRelease(awsLBControllerInfo, helmWaitMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading aws-services: %w", err)
|
||||
}
|
||||
releases.AWSLoadBalancerController = &awsRelease
|
||||
}
|
||||
|
||||
if config.DeployCSIDriver() {
|
||||
csi, err := i.loadRelease(csiInfo, helmWaitMode)
|
||||
@ -145,6 +156,7 @@ func (i *ChartLoader) Load(config *config.Config, conformanceMode bool, helmWait
|
||||
}
|
||||
|
||||
// loadRelease loads the embedded chart and values depending on the given info argument.
|
||||
// IMPORTANT: .helmignore rules specifying files in subdirectories are not applied (e.g. crds/kustomization.yaml).
|
||||
func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode helm.WaitMode) (helm.Release, error) {
|
||||
chart, err := loadChartsDir(helmFS, info.path)
|
||||
if err != nil {
|
||||
@ -168,6 +180,8 @@ func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode helm.WaitMode) (h
|
||||
case constellationServicesInfo.releaseName:
|
||||
updateVersions(chart, compatibility.EnsurePrefixV(constants.VersionInfo()))
|
||||
values = i.loadConstellationServicesValues()
|
||||
case awsLBControllerInfo.releaseName:
|
||||
values = i.loadAWSLBControllerValues()
|
||||
case csiInfo.releaseName:
|
||||
updateVersions(chart, compatibility.EnsurePrefixV(constants.VersionInfo()))
|
||||
values = i.loadCSIValues()
|
||||
@ -181,6 +195,14 @@ func (i *ChartLoader) loadRelease(info chartInfo, helmWaitMode helm.WaitMode) (h
|
||||
return helm.Release{Chart: chartRaw, Values: values, ReleaseName: info.releaseName, WaitMode: helmWaitMode}, nil
|
||||
}
|
||||
|
||||
func (i *ChartLoader) loadAWSLBControllerValues() map[string]any {
|
||||
return map[string]any{
|
||||
"clusterName": i.clusterName,
|
||||
"tolerations": controlPlaneTolerations,
|
||||
"nodeSelector": controlPlaneNodeSelector,
|
||||
}
|
||||
}
|
||||
|
||||
// extendCiliumValues extends the given values map by some values depending on user input.
|
||||
// This extra step of separating the application of user input is necessary since service upgrades should
|
||||
// reuse user input from the init step. However, we can't rely on reuse-values, because
|
||||
@ -271,7 +293,7 @@ func (i *ChartLoader) loadConstellationServicesValues() map[string]any {
|
||||
"image": i.ccmImage,
|
||||
},
|
||||
"cnm": map[string]any{
|
||||
"image": i.cnmImage,
|
||||
"image": i.azureCNMImage,
|
||||
},
|
||||
"autoscaler": map[string]any{
|
||||
"csp": i.csp.String(),
|
||||
@ -400,6 +422,7 @@ func (i *ChartLoader) marshalChart(chart *chart.Chart) ([]byte, error) {
|
||||
// loadChartsDir loads from a directory.
|
||||
//
|
||||
// This loads charts only from directories.
|
||||
// IMPORTANT: .helmignore rules specifying files in subdirectories are not applied (e.g. crds/kustomization.yaml).
|
||||
func loadChartsDir(efs embed.FS, dir string) (*chart.Chart, error) {
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
// Just used for errors.
|
||||
|
@ -51,6 +51,17 @@ func TestLoad(t *testing.T) {
|
||||
assert.NotNil(chart.Dependencies())
|
||||
}
|
||||
|
||||
func TestLoadAWSLoadBalancerValues(t *testing.T) {
|
||||
sut := ChartLoader{
|
||||
clusterName: "testCluster",
|
||||
}
|
||||
val := sut.loadAWSLBControllerValues()
|
||||
assert.Equal(t, "testCluster", val["clusterName"])
|
||||
// needs to run on control-plane
|
||||
assert.Contains(t, val["nodeSelector"].(map[string]any), "node-role.kubernetes.io/control-plane")
|
||||
assert.Contains(t, val["tolerations"].([]map[string]any), map[string]any{"key": "node-role.kubernetes.io/control-plane", "operator": "Exists", "effect": "NoSchedule"})
|
||||
}
|
||||
|
||||
// TestConstellationServices checks if the rendered constellation-services chart produces the expected yaml files.
|
||||
func TestConstellationServices(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
@ -132,11 +143,12 @@ func TestConstellationServices(t *testing.T) {
|
||||
joinServiceImage: "joinServiceImage",
|
||||
keyServiceImage: "keyServiceImage",
|
||||
ccmImage: tc.ccmImage,
|
||||
cnmImage: tc.cnmImage,
|
||||
azureCNMImage: tc.cnmImage,
|
||||
autoscalerImage: "autoscalerImage",
|
||||
verificationServiceImage: "verificationImage",
|
||||
konnectivityImage: "konnectivityImage",
|
||||
gcpGuestAgentImage: "gcpGuestAgentImage",
|
||||
clusterName: "testCluster",
|
||||
}
|
||||
chart, err := loadChartsDir(helmFS, constellationServicesInfo.path)
|
||||
require.NoError(err)
|
||||
@ -214,7 +226,7 @@ func TestOperators(t *testing.T) {
|
||||
joinServiceImage: "joinServiceImage",
|
||||
keyServiceImage: "keyServiceImage",
|
||||
ccmImage: "ccmImage",
|
||||
cnmImage: "cnmImage",
|
||||
azureCNMImage: "cnmImage",
|
||||
autoscalerImage: "autoscalerImage",
|
||||
constellationOperatorImage: "constellationOperatorImage",
|
||||
nodeMaintenanceOperatorImage: "nodeMaintenanceOperatorImage",
|
||||
|
@ -22,6 +22,7 @@ spec:
|
||||
- --leader-elect=true
|
||||
- --allocate-node-cidrs=false
|
||||
- --configure-cloud-routes=false
|
||||
- --controllers=*,-service
|
||||
- -v=2
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes
|
||||
|
50
cli/internal/helm/update-aws-load-balancer-chart.sh
Executable file
50
cli/internal/helm/update-aws-load-balancer-chart.sh
Executable file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# updates the Helm charts for the AWS Load Balancer Controller in the CLI.
|
||||
# script is mostly copied from cli/internal/helm/update-csi-charts.sh
|
||||
|
||||
set -euo pipefail
|
||||
set -o errtrace
|
||||
shopt -s inherit_errexit
|
||||
|
||||
echo "Updating AWS Load Balancer Controller Helm chart..."
|
||||
branch="v0.0.140" # releases can update the AWS load-balancer-controller chart
|
||||
# Required tools
|
||||
if ! command -v git &> /dev/null; then
|
||||
echo "git could not be found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
callDir=$(pwd)
|
||||
repo_tmp_dir=$(mktemp -d)
|
||||
|
||||
chart_base_path="charts"
|
||||
chart_name="aws-load-balancer-controller"
|
||||
|
||||
chart_url="https://github.com/aws/eks-charts"
|
||||
chart_dir="stable/aws-load-balancer-controller"
|
||||
cd "${repo_tmp_dir}"
|
||||
git clone \
|
||||
--filter=blob:none \
|
||||
--no-checkout \
|
||||
--sparse \
|
||||
--depth 1 \
|
||||
--branch="${branch}" \
|
||||
"${chart_url}" "${repo_tmp_dir}"
|
||||
|
||||
git sparse-checkout add "${chart_dir}"
|
||||
git checkout
|
||||
cd "${callDir}"
|
||||
|
||||
# remove files being ignored in .helmignore due to wrong import of .helmignore in current implementation
|
||||
rm -r "${repo_tmp_dir}/${chart_dir}/ci"
|
||||
rm "${repo_tmp_dir}/${chart_dir}/crds/kustomization.yaml"
|
||||
rm "${repo_tmp_dir}/${chart_dir}/test.yaml"
|
||||
|
||||
# delete current chart
|
||||
rm -r "${chart_base_path:?}/${chart_name}"
|
||||
|
||||
# move new chart
|
||||
mkdir -p "${chart_base_path}/${chart_name}"
|
||||
# do not use /* because it will not copy hidden files
|
||||
cp -r "${repo_tmp_dir}/${chart_dir}" "${chart_base_path}/"
|
@ -194,6 +194,8 @@ var ciliumVals = map[string]map[string]any{
|
||||
},
|
||||
}
|
||||
|
||||
var controlPlaneNodeSelector = map[string]any{"node-role.kubernetes.io/control-plane": ""}
|
||||
|
||||
var controlPlaneTolerations = []map[string]any{
|
||||
{
|
||||
"key": "node-role.kubernetes.io/control-plane",
|
||||
|
@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/kubernetes",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
"//cli/internal/clusterid",
|
||||
"//cli/internal/helm",
|
||||
"//cli/internal/terraform",
|
||||
"//cli/internal/upgrade",
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/clusterid"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/upgrade"
|
||||
@ -155,6 +156,11 @@ func NewUpgrader(ctx context.Context, outWriter io.Writer, fileHandler file.Hand
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// GetUpgradeID returns the upgrade ID.
|
||||
func (u *Upgrader) GetUpgradeID() string {
|
||||
return u.upgradeID
|
||||
}
|
||||
|
||||
// AddManualStateMigration adds a manual state migration to the Terraform client.
|
||||
// TODO(AB#3248): Remove this method after we can assume that all existing clusters have been migrated.
|
||||
func (u *Upgrader) AddManualStateMigration(migration terraform.StateMigration) {
|
||||
@ -164,7 +170,7 @@ func (u *Upgrader) AddManualStateMigration(migration terraform.StateMigration) {
|
||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||
// If the files that will be written during the upgrade already exist, it returns an error.
|
||||
func (u *Upgrader) CheckTerraformMigrations() error {
|
||||
return u.tfUpgrader.CheckTerraformMigrations(u.upgradeID)
|
||||
return u.tfUpgrader.CheckTerraformMigrations(u.upgradeID, constants.TerraformUpgradeBackupDir)
|
||||
}
|
||||
|
||||
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
|
||||
@ -180,7 +186,7 @@ func (u *Upgrader) PlanTerraformMigrations(ctx context.Context, opts upgrade.Ter
|
||||
return u.tfUpgrader.PlanTerraformMigrations(ctx, opts, u.upgradeID)
|
||||
}
|
||||
|
||||
// ApplyTerraformMigrations applies the migerations planned by PlanTerraformMigrations.
|
||||
// ApplyTerraformMigrations applies the migrations planned by PlanTerraformMigrations.
|
||||
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
||||
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
||||
// By the new one.
|
||||
@ -189,8 +195,8 @@ func (u *Upgrader) ApplyTerraformMigrations(ctx context.Context, opts upgrade.Te
|
||||
}
|
||||
|
||||
// UpgradeHelmServices upgrade helm services.
|
||||
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive bool, force bool) error {
|
||||
return u.helmClient.Upgrade(ctx, config, timeout, allowDestructive, force, u.upgradeID)
|
||||
func (u *Upgrader) UpgradeHelmServices(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive bool, force bool) error {
|
||||
return u.helmClient.Upgrade(ctx, config, idFile, timeout, allowDestructive, force, u.upgradeID)
|
||||
}
|
||||
|
||||
// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
|
||||
@ -578,7 +584,7 @@ func (u *stableClient) KubernetesVersion() (string, error) {
|
||||
}
|
||||
|
||||
type helmInterface interface {
|
||||
Upgrade(ctx context.Context, config *config.Config, timeout time.Duration, allowDestructive, force bool, upgradeID string) error
|
||||
Upgrade(ctx context.Context, config *config.Config, idFile clusterid.File, timeout time.Duration, allowDestructive, force bool, upgradeID string) error
|
||||
}
|
||||
|
||||
type debugLog interface {
|
||||
|
@ -102,6 +102,18 @@ func (c *Client) PrepareUpgradeWorkspace(path, oldWorkingDir, newWorkingDir, bac
|
||||
return c.writeVars(vars)
|
||||
}
|
||||
|
||||
// PrepareIAMUpgradeWorkspace prepares a Terraform workspace for a Constellation IAM upgrade.
|
||||
func PrepareIAMUpgradeWorkspace(file file.Handler, path, oldWorkingDir, newWorkingDir, backupDir string) error {
|
||||
if err := prepareUpgradeWorkspace(path, file, oldWorkingDir, newWorkingDir, backupDir); err != nil {
|
||||
return fmt.Errorf("prepare upgrade workspace: %w", err)
|
||||
}
|
||||
// copy the vars file from the old working dir to the new working dir
|
||||
if err := file.CopyFile(filepath.Join(oldWorkingDir, terraformVarsFile), filepath.Join(newWorkingDir, terraformVarsFile)); err != nil {
|
||||
return fmt.Errorf("copying vars file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateCluster creates a Constellation cluster using Terraform.
|
||||
func (c *Client) CreateCluster(ctx context.Context, logLevel LogLevel) (ApplyOutput, error) {
|
||||
if err := c.setLogLevel(logLevel); err != nil {
|
||||
@ -218,8 +230,8 @@ type AWSIAMOutput struct {
|
||||
WorkerNodeInstanceProfile string
|
||||
}
|
||||
|
||||
// CreateIAMConfig creates an IAM configuration using Terraform.
|
||||
func (c *Client) CreateIAMConfig(ctx context.Context, provider cloudprovider.Provider, logLevel LogLevel) (IAMOutput, error) {
|
||||
// ApplyIAMConfig creates an IAM configuration using Terraform.
|
||||
func (c *Client) ApplyIAMConfig(ctx context.Context, provider cloudprovider.Provider, logLevel LogLevel) (IAMOutput, error) {
|
||||
if err := c.setLogLevel(logLevel); err != nil {
|
||||
return IAMOutput{}, fmt.Errorf("set terraform log level %s: %w", logLevel.String(), err)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ resource "aws_subnet" "private" {
|
||||
vpc_id = var.vpc_id
|
||||
cidr_block = cidrsubnet(var.cidr_vpc_subnet_nodes, 4, local.az_number[each.value.name_suffix])
|
||||
availability_zone = each.key
|
||||
tags = merge(var.tags, { Name = "${var.name}-subnet-nodes" })
|
||||
tags = merge(var.tags, { Name = "${var.name}-subnet-nodes" }, { "kubernetes.io/role/internal-elb" = 1 }) # aws-load-balancer-controller needs role annotation
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
cidr_block, # required. Legacy subnets used fixed cidr blocks for the single zone that don't match the new scheme.
|
||||
@ -65,7 +65,7 @@ resource "aws_subnet" "public" {
|
||||
vpc_id = var.vpc_id
|
||||
cidr_block = cidrsubnet(var.cidr_vpc_subnet_internet, 4, local.az_number[each.value.name_suffix])
|
||||
availability_zone = each.key
|
||||
tags = merge(var.tags, { Name = "${var.name}-subnet-internet" })
|
||||
tags = merge(var.tags, { Name = "${var.name}-subnet-internet" }, { "kubernetes.io/role/elb" = 1 }) # aws-load-balancer-controller needs role annotation
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
cidr_block, # required. Legacy subnets used fixed cidr blocks for the single zone that don't match the new scheme.
|
||||
|
@ -51,6 +51,12 @@ resource "aws_iam_policy" "control_plane_policy" {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:DescribeTargetGroupAttributes",
|
||||
"elasticloadbalancing:DescribeRules",
|
||||
"shield:GetSubscriptionState",
|
||||
"elasticloadbalancing:DescribeListeners",
|
||||
"elasticloadbalancing:ModifyTargetGroupAttributes",
|
||||
"elasticloadbalancing:DescribeTags",
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
|
@ -10,8 +10,7 @@ terraform {
|
||||
locals {
|
||||
tags = distinct(sort(concat(var.tags, ["constellation-role-${var.role}"], ["constellation-node-group-${var.node_group_name}"])))
|
||||
group_uid = random_id.uid.hex
|
||||
#name = "${var.base_name}-${var.role}" // TODO keep old naming ?
|
||||
name = "${var.base_name}-${var.role}-${local.group_uid}"
|
||||
name = "${var.base_name}-${var.role}-${local.group_uid}"
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
|
@ -727,7 +727,7 @@ func TestCreateIAM(t *testing.T) {
|
||||
|
||||
path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String()))
|
||||
require.NoError(c.PrepareWorkspace(path, tc.vars))
|
||||
IAMoutput, err := c.CreateIAMConfig(context.Background(), tc.provider, LogLevelDebug)
|
||||
IAMoutput, err := c.ApplyIAMConfig(context.Background(), tc.provider, LogLevelDebug)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
|
@ -4,6 +4,7 @@ load("//bazel/go:go_test.bzl", "go_test")
|
||||
go_library(
|
||||
name = "upgrade",
|
||||
srcs = [
|
||||
"iammigrate.go",
|
||||
"terraform.go",
|
||||
"upgrade.go",
|
||||
],
|
||||
@ -21,7 +22,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "upgrade_test",
|
||||
srcs = ["terraform_test.go"],
|
||||
srcs = [
|
||||
"iammigrate_test.go",
|
||||
"terraform_test.go",
|
||||
],
|
||||
embed = [":upgrade"],
|
||||
deps = [
|
||||
"//cli/internal/terraform",
|
||||
|
112
cli/internal/upgrade/iammigrate.go
Normal file
112
cli/internal/upgrade/iammigrate.go
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
)
|
||||
|
||||
// TfMigrationCmd is an interface for all terraform upgrade / migration commands.
|
||||
type TfMigrationCmd interface {
|
||||
CheckTerraformMigrations(file file.Handler) error
|
||||
Plan(ctx context.Context, file file.Handler, outWriter io.Writer) (bool, error)
|
||||
Apply(ctx context.Context, fileHandler file.Handler) error
|
||||
String() string
|
||||
UpgradeID() string
|
||||
}
|
||||
|
||||
// IAMMigrateCmd is a terraform migration command for IAM.
|
||||
type IAMMigrateCmd struct {
|
||||
tf tfIAMClient
|
||||
upgradeID string
|
||||
csp cloudprovider.Provider
|
||||
logLevel terraform.LogLevel
|
||||
}
|
||||
|
||||
// NewIAMMigrateCmd creates a new IAMMigrateCmd.
|
||||
func NewIAMMigrateCmd(ctx context.Context, upgradeID string, csp cloudprovider.Provider, logLevel terraform.LogLevel) (*IAMMigrateCmd, error) {
|
||||
tfClient, err := terraform.New(ctx, filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformIAMUpgradeWorkingDir))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up terraform client: %w", err)
|
||||
}
|
||||
return &IAMMigrateCmd{
|
||||
tf: tfClient,
|
||||
upgradeID: upgradeID,
|
||||
csp: csp,
|
||||
logLevel: logLevel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the name of the command.
|
||||
func (c *IAMMigrateCmd) String() string {
|
||||
return "iam migration"
|
||||
}
|
||||
|
||||
// UpgradeID returns the upgrade ID.
|
||||
func (c *IAMMigrateCmd) UpgradeID() string {
|
||||
return c.upgradeID
|
||||
}
|
||||
|
||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||
func (c *IAMMigrateCmd) CheckTerraformMigrations(file file.Handler) error {
|
||||
return checkTerraformMigrations(file, c.upgradeID, constants.TerraformIAMUpgradeBackupDir)
|
||||
}
|
||||
|
||||
// Plan prepares the upgrade workspace and plans the Terraform migrations for the Constellation upgrade, writing the plan to the outWriter.
|
||||
func (c *IAMMigrateCmd) Plan(ctx context.Context, file file.Handler, outWriter io.Writer) (bool, error) {
|
||||
templateDir := filepath.Join("terraform", "iam", strings.ToLower(c.csp.String()))
|
||||
if err := terraform.PrepareIAMUpgradeWorkspace(file,
|
||||
templateDir,
|
||||
constants.TerraformIAMWorkingDir,
|
||||
filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir),
|
||||
filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeBackupDir),
|
||||
); err != nil {
|
||||
return false, fmt.Errorf("preparing terraform workspace: %w", err)
|
||||
}
|
||||
|
||||
hasDiff, err := c.tf.Plan(ctx, c.logLevel, constants.TerraformUpgradePlanFile)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("terraform plan: %w", err)
|
||||
}
|
||||
|
||||
if hasDiff {
|
||||
if err := c.tf.ShowPlan(ctx, c.logLevel, constants.TerraformUpgradePlanFile, outWriter); err != nil {
|
||||
return false, fmt.Errorf("terraform show plan: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return hasDiff, nil
|
||||
}
|
||||
|
||||
// Apply applies the Terraform IAM migrations for the Constellation upgrade.
|
||||
func (c *IAMMigrateCmd) Apply(ctx context.Context, fileHandler file.Handler) error {
|
||||
if _, err := c.tf.ApplyIAMConfig(ctx, c.csp, c.logLevel); err != nil {
|
||||
return fmt.Errorf("terraform apply: %w", err)
|
||||
}
|
||||
|
||||
if err := fileHandler.RemoveAll(constants.TerraformIAMWorkingDir); err != nil {
|
||||
return fmt.Errorf("removing old terraform directory: %w", err)
|
||||
}
|
||||
if err := fileHandler.CopyDir(filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir), constants.TerraformIAMWorkingDir); err != nil {
|
||||
return fmt.Errorf("replacing old terraform directory with new one: %w", err)
|
||||
}
|
||||
|
||||
if err := fileHandler.RemoveAll(filepath.Join(constants.UpgradeDir, c.upgradeID, constants.TerraformIAMUpgradeWorkingDir)); err != nil {
|
||||
return fmt.Errorf("removing terraform upgrade directory: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
112
cli/internal/upgrade/iammigrate_test.go
Normal file
112
cli/internal/upgrade/iammigrate_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/terraform"
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIAMMigrate(t *testing.T) {
|
||||
upgradeID := "test-upgrade"
|
||||
upgradeDir := filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformIAMUpgradeWorkingDir)
|
||||
fs, file := setupMemFSAndFileHandler(t, []string{"terraform.tfvars", "terraform.tfstate"}, []byte("OLD"))
|
||||
// act
|
||||
fakeTfClient := &tfClientStub{upgradeID, file}
|
||||
sut := &IAMMigrateCmd{fakeTfClient, upgradeID, cloudprovider.AWS, terraform.LogLevelDebug}
|
||||
hasDiff, err := sut.Plan(context.Background(), file, bytes.NewBuffer(nil))
|
||||
// assert
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, hasDiff)
|
||||
assertFileExists(fs, filepath.Join(upgradeDir, "terraform.tfvars"), t)
|
||||
assertFileExists(fs, filepath.Join(upgradeDir, "terraform.tfstate"), t)
|
||||
|
||||
// act
|
||||
err = sut.Apply(context.Background(), file)
|
||||
assert.NoError(t, err)
|
||||
// assert
|
||||
assertFileReadsContent(file, filepath.Join(constants.TerraformIAMWorkingDir, "terraform.tfvars"), "NEW", t)
|
||||
assertFileReadsContent(file, filepath.Join(constants.TerraformIAMWorkingDir, "terraform.tfstate"), "NEW", t)
|
||||
assertFileDoesntExist(fs, filepath.Join(upgradeDir), t)
|
||||
}
|
||||
|
||||
func assertFileReadsContent(file file.Handler, path string, expectedContent string, t *testing.T) {
|
||||
bt, err := file.Read(path)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedContent, string(bt))
|
||||
}
|
||||
|
||||
func assertFileExists(fs afero.Fs, path string, t *testing.T) {
|
||||
res, err := fs.Stat(path)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, res)
|
||||
}
|
||||
|
||||
func assertFileDoesntExist(fs afero.Fs, path string, t *testing.T) {
|
||||
res, err := fs.Stat(path)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, res)
|
||||
}
|
||||
|
||||
// setupMemFSAndFileHandler sets up a file handler with a memory file system and writes the given files with the given content.
|
||||
func setupMemFSAndFileHandler(t *testing.T, files []string, content []byte) (afero.Fs, file.Handler) {
|
||||
fs := afero.NewMemMapFs()
|
||||
file := file.NewHandler(fs)
|
||||
err := file.MkdirAll(constants.TerraformIAMWorkingDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, f := range files {
|
||||
err := file.Write(filepath.Join(constants.TerraformIAMWorkingDir, f), content)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return fs, file
|
||||
}
|
||||
|
||||
type tfClientStub struct {
|
||||
upgradeID string
|
||||
file file.Handler
|
||||
}
|
||||
|
||||
func (t *tfClientStub) Plan(_ context.Context, _ terraform.LogLevel, _ string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (t *tfClientStub) ShowPlan(_ context.Context, _ terraform.LogLevel, _ string, _ io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tfClientStub) ApplyIAMConfig(_ context.Context, _ cloudprovider.Provider, _ terraform.LogLevel) (terraform.IAMOutput, error) {
|
||||
upgradeDir := filepath.Join(constants.UpgradeDir, t.upgradeID, constants.TerraformIAMUpgradeWorkingDir)
|
||||
err := t.file.Remove(filepath.Join(upgradeDir, "terraform.tfvars"))
|
||||
if err != nil {
|
||||
return terraform.IAMOutput{}, err
|
||||
}
|
||||
err = t.file.Write(filepath.Join(upgradeDir, "terraform.tfvars"), []byte("NEW"))
|
||||
if err != nil {
|
||||
return terraform.IAMOutput{}, err
|
||||
}
|
||||
err = t.file.Remove(filepath.Join(upgradeDir, "terraform.tfstate"))
|
||||
if err != nil {
|
||||
return terraform.IAMOutput{}, err
|
||||
}
|
||||
err = t.file.Write(filepath.Join(upgradeDir, "terraform.tfstate"), []byte("NEW"))
|
||||
if err != nil {
|
||||
return terraform.IAMOutput{}, err
|
||||
}
|
||||
return terraform.IAMOutput{}, nil
|
||||
}
|
@ -23,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// NewTerraformUpgrader returns a new TerraformUpgrader.
|
||||
func NewTerraformUpgrader(tfClient tfClient, outWriter io.Writer, fileHandler file.Handler) (*TerraformUpgrader, error) {
|
||||
func NewTerraformUpgrader(tfClient tfResourceClient, outWriter io.Writer, fileHandler file.Handler) (*TerraformUpgrader, error) {
|
||||
return &TerraformUpgrader{
|
||||
tf: tfClient,
|
||||
policyPatcher: cloudcmd.NewAzurePolicyPatcher(),
|
||||
@ -34,7 +34,7 @@ func NewTerraformUpgrader(tfClient tfClient, outWriter io.Writer, fileHandler fi
|
||||
|
||||
// TerraformUpgrader is responsible for performing Terraform migrations on cluster upgrades.
|
||||
type TerraformUpgrader struct {
|
||||
tf tfClient
|
||||
tf tfResourceClient
|
||||
policyPatcher policyPatcher
|
||||
outWriter io.Writer
|
||||
fileHandler file.Handler
|
||||
@ -51,15 +51,14 @@ type TerraformUpgradeOptions struct {
|
||||
}
|
||||
|
||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||
// If the files that will be written during the upgrade already exist, it returns an error.
|
||||
func (u *TerraformUpgrader) CheckTerraformMigrations(upgradeID string) error {
|
||||
func checkTerraformMigrations(file file.Handler, upgradeID, upgradeSubDir string) error {
|
||||
var existingFiles []string
|
||||
filesToCheck := []string{
|
||||
filepath.Join(constants.UpgradeDir, upgradeID, constants.TerraformUpgradeBackupDir),
|
||||
filepath.Join(constants.UpgradeDir, upgradeID, upgradeSubDir),
|
||||
}
|
||||
|
||||
for _, f := range filesToCheck {
|
||||
if err := checkFileExists(u.fileHandler, &existingFiles, f); err != nil {
|
||||
if err := checkFileExists(file, &existingFiles, f); err != nil {
|
||||
return fmt.Errorf("checking terraform migrations: %w", err)
|
||||
}
|
||||
}
|
||||
@ -70,6 +69,12 @@ func (u *TerraformUpgrader) CheckTerraformMigrations(upgradeID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckTerraformMigrations checks whether Terraform migrations are possible in the current workspace.
|
||||
// If the files that will be written during the upgrade already exist, it returns an error.
|
||||
func (u *TerraformUpgrader) CheckTerraformMigrations(upgradeID, upgradeSubDir string) error {
|
||||
return checkTerraformMigrations(u.fileHandler, upgradeID, upgradeSubDir)
|
||||
}
|
||||
|
||||
// checkFileExists checks whether a file exists and adds it to the existingFiles slice if it does.
|
||||
func checkFileExists(fileHandler file.Handler, existingFiles *[]string, filename string) error {
|
||||
_, err := fileHandler.Stat(filename)
|
||||
@ -125,12 +130,17 @@ func (u *TerraformUpgrader) PlanTerraformMigrations(ctx context.Context, opts Te
|
||||
// CleanUpTerraformMigrations cleans up the Terraform migration workspace, for example when an upgrade is
|
||||
// aborted by the user.
|
||||
func (u *TerraformUpgrader) CleanUpTerraformMigrations(upgradeID string) error {
|
||||
return CleanUpTerraformMigrations(upgradeID, u.fileHandler)
|
||||
}
|
||||
|
||||
// CleanUpTerraformMigrations cleans up the Terraform upgrade directory.
|
||||
func CleanUpTerraformMigrations(upgradeID string, fileHandler file.Handler) error {
|
||||
cleanupFiles := []string{
|
||||
filepath.Join(constants.UpgradeDir, upgradeID),
|
||||
}
|
||||
|
||||
for _, f := range cleanupFiles {
|
||||
if err := u.fileHandler.RemoveAll(f); err != nil {
|
||||
if err := fileHandler.RemoveAll(f); err != nil {
|
||||
return fmt.Errorf("cleaning up file %s: %w", f, err)
|
||||
}
|
||||
}
|
||||
@ -138,7 +148,7 @@ func (u *TerraformUpgrader) CleanUpTerraformMigrations(upgradeID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyTerraformMigrations applies the migerations planned by PlanTerraformMigrations.
|
||||
// ApplyTerraformMigrations applies the migrations planned by PlanTerraformMigrations.
|
||||
// If PlanTerraformMigrations has not been executed before, it will return an error.
|
||||
// In case of a successful upgrade, the output will be written to the specified file and the old Terraform directory is replaced
|
||||
// By the new one.
|
||||
@ -197,12 +207,22 @@ func (u *TerraformUpgrader) mergeClusterIDFile(migrationOutput clusterid.File) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// a tfClient performs the Terraform interactions in an upgrade.
|
||||
type tfClient interface {
|
||||
PrepareUpgradeWorkspace(path, oldWorkingDir, newWorkingDir, upgradeID string, vars terraform.Variables) error
|
||||
type tfClientCommon interface {
|
||||
ShowPlan(ctx context.Context, logLevel terraform.LogLevel, planFilePath string, output io.Writer) error
|
||||
Plan(ctx context.Context, logLevel terraform.LogLevel, planFile string) (bool, error)
|
||||
}
|
||||
|
||||
// tfResourceClient is a Terraform client for managing cluster resources.
|
||||
type tfResourceClient interface {
|
||||
PrepareUpgradeWorkspace(path, oldWorkingDir, newWorkingDir, backupDir string, vars terraform.Variables) error
|
||||
CreateCluster(ctx context.Context, logLevel terraform.LogLevel) (terraform.ApplyOutput, error)
|
||||
tfClientCommon
|
||||
}
|
||||
|
||||
// tfIAMClient is a Terraform client for managing IAM resources.
|
||||
type tfIAMClient interface {
|
||||
ApplyIAMConfig(ctx context.Context, csp cloudprovider.Provider, logLevel terraform.LogLevel) (terraform.IAMOutput, error)
|
||||
tfClientCommon
|
||||
}
|
||||
|
||||
// policyPatcher interacts with the CSP (currently only applies for Azure) to update the attestation policy.
|
||||
|
@ -58,7 +58,7 @@ func TestCheckTerraformMigrations(t *testing.T) {
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
u := upgrader(tc.workspace)
|
||||
err := u.CheckTerraformMigrations(tc.upgradeID)
|
||||
err := u.CheckTerraformMigrations(tc.upgradeID, constants.TerraformUpgradeBackupDir)
|
||||
if tc.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
@ -70,7 +70,7 @@ func TestCheckTerraformMigrations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPlanTerraformMigrations(t *testing.T) {
|
||||
upgrader := func(tf tfClient, fileHandler file.Handler) *TerraformUpgrader {
|
||||
upgrader := func(tf tfResourceClient, fileHandler file.Handler) *TerraformUpgrader {
|
||||
u, err := NewTerraformUpgrader(tf, bytes.NewBuffer(nil), fileHandler)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -87,7 +87,7 @@ func TestPlanTerraformMigrations(t *testing.T) {
|
||||
|
||||
testCases := map[string]struct {
|
||||
upgradeID string
|
||||
tf tfClient
|
||||
tf tfResourceClient
|
||||
workspace file.Handler
|
||||
want bool
|
||||
wantErr bool
|
||||
@ -174,7 +174,7 @@ func TestPlanTerraformMigrations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestApplyTerraformMigrations(t *testing.T) {
|
||||
upgrader := func(tf tfClient, fileHandler file.Handler) *TerraformUpgrader {
|
||||
upgrader := func(tf tfResourceClient, fileHandler file.Handler) *TerraformUpgrader {
|
||||
u, err := NewTerraformUpgrader(tf, bytes.NewBuffer(nil), fileHandler)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -197,7 +197,7 @@ func TestApplyTerraformMigrations(t *testing.T) {
|
||||
|
||||
testCases := map[string]struct {
|
||||
upgradeID string
|
||||
tf tfClient
|
||||
tf tfResourceClient
|
||||
policyPatcher stubPolicyPatcher
|
||||
fs file.Handler
|
||||
skipIDFileCreation bool // if true, do not create the constellation-id.json file
|
||||
|
6
dev-docs/workflows/e2e-github-config.md
Normal file
6
dev-docs/workflows/e2e-github-config.md
Normal file
@ -0,0 +1,6 @@
|
||||
## [E2E upgrade test]((https://github.com/edgelesssys/constellation/actions/workflows/e2e-upgrade.yml)
|
||||
Make sure to set the correct parameters to avoid late failures:
|
||||
- it's easiest to use the latest CLI version, because then you can omit all other fields. This works because the devbuild is tagged with the next release version and hence is compatible.
|
||||
- if using an older CLI version:
|
||||
- the last field about simulating a patch-upgrade must have a minor version that is smaller by one compared to the next release
|
||||
- the image version must match the patch field version
|
@ -276,92 +276,8 @@ To [create the IAM configuration](../workflows/config.md#creating-an-iam-configu
|
||||
|
||||
The built-in `AdministratorAccess` policy is a superset of these permissions.
|
||||
|
||||
To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions:
|
||||
To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/cli/internal/terraform/terraform/iam/aws/main.tf).
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"autoscaling:CreateAutoScalingGroup",
|
||||
"autoscaling:DeleteAutoScalingGroup",
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeScalingActivities",
|
||||
"autoscaling:SetInstanceProtection",
|
||||
"autoscaling:UpdateAutoScalingGroup",
|
||||
"ec2:AllocateAddress",
|
||||
"ec2:AssociateRouteTable",
|
||||
"ec2:AttachInternetGateway",
|
||||
"ec2:AuthorizeSecurityGroupEgress",
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:CreateInternetGateway",
|
||||
"ec2:CreateLaunchTemplate",
|
||||
"ec2:CreateNatGateway",
|
||||
"ec2:CreateRoute",
|
||||
"ec2:CreateRouteTable",
|
||||
"ec2:CreateSecurityGroup",
|
||||
"ec2:CreateSubnet",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVpc",
|
||||
"ec2:DeleteInternetGateway",
|
||||
"ec2:DeleteLaunchTemplate",
|
||||
"ec2:DeleteNatGateway",
|
||||
"ec2:DeleteRouteTable",
|
||||
"ec2:DeleteSecurityGroup",
|
||||
"ec2:DeleteSubnet",
|
||||
"ec2:DeleteVpc",
|
||||
"ec2:DescribeAccountAttributes",
|
||||
"ec2:DescribeAddresses",
|
||||
"ec2:DescribeAvailabilityZones",
|
||||
"ec2:DescribeInternetGateways",
|
||||
"ec2:DescribeLaunchTemplates",
|
||||
"ec2:DescribeLaunchTemplateVersions",
|
||||
"ec2:DescribeNatGateways",
|
||||
"ec2:DescribeNetworkAcls",
|
||||
"ec2:DescribeNetworkInterfaces",
|
||||
"ec2:DescribeRouteTables",
|
||||
"ec2:DescribeSecurityGroups",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:DescribeVpcAttribute",
|
||||
"ec2:DescribeVpcClassicLink",
|
||||
"ec2:DescribeVpcClassicLinkDnsSupport",
|
||||
"ec2:DescribeVpcs",
|
||||
"ec2:DetachInternetGateway",
|
||||
"ec2:DisassociateAddress",
|
||||
"ec2:DisassociateRouteTable",
|
||||
"ec2:ReleaseAddress",
|
||||
"ec2:RevokeSecurityGroupEgress",
|
||||
"elasticloadbalancing:AddTags",
|
||||
"elasticloadbalancing:CreateListener",
|
||||
"elasticloadbalancing:CreateLoadBalancer",
|
||||
"elasticloadbalancing:CreateTargetGroup",
|
||||
"elasticloadbalancing:DeleteListener",
|
||||
"elasticloadbalancing:DeleteLoadBalancer",
|
||||
"elasticloadbalancing:DeleteTargetGroup",
|
||||
"elasticloadbalancing:DescribeListeners",
|
||||
"elasticloadbalancing:DescribeLoadBalancerAttributes",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:DescribeTags",
|
||||
"elasticloadbalancing:DescribeTargetGroupAttributes",
|
||||
"elasticloadbalancing:DescribeTargetGroups",
|
||||
"elasticloadbalancing:DescribeTargetHealth",
|
||||
"elasticloadbalancing:ModifyLoadBalancerAttributes",
|
||||
"elasticloadbalancing:ModifyTargetGroupAttributes",
|
||||
"iam:PassRole",
|
||||
"logs:CreateLogGroup",
|
||||
"logs:DeleteLogGroup",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:ListTagsLogGroup",
|
||||
"logs:PutRetentionPolicy",
|
||||
"sts:GetCallerIdentity"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The built-in `PowerUserAccess` policy is a superset of these permissions.
|
||||
|
||||
|
@ -1 +1,16 @@
|
||||
# Expose services
|
||||
# Expose a service
|
||||
Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer).
|
||||
|
||||
|
||||
|
||||
## Internet-facing LB service on AWS
|
||||
|
||||
To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancing Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS.
|
||||
|
||||
Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/nlb/).
|
||||
|
||||
For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html).
|
||||
|
||||
:::caution
|
||||
Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources.
|
||||
:::
|
||||
|
@ -143,6 +143,11 @@ const sidebars = {
|
||||
label: 'Upgrade your cluster',
|
||||
id: 'workflows/upgrade',
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
label: 'Expose a service',
|
||||
id: 'workflows/lb',
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
label: 'Install cert-manager',
|
||||
|
@ -80,7 +80,7 @@ func TestUpgrade(t *testing.T) {
|
||||
|
||||
// Migrate config if necessary.
|
||||
log.Println("Migrating config if needed.")
|
||||
cmd := exec.CommandContext(context.Background(), cli, "config", "migrate", "--config", constants.ConfigFilename, "--force", "--debug")
|
||||
cmd := exec.CommandContext(context.Background(), cli, "config", "migrate", "--config", constants.ConfigFilename, "--debug")
|
||||
stdout, stderr, err := runCommandWithSeparateOutputs(cmd)
|
||||
require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
||||
log.Println(string(stdout))
|
||||
@ -314,7 +314,7 @@ func writeUpgradeConfig(require *require.Assertions, image string, kubernetes st
|
||||
// runUpgradeCheck executes 'upgrade check' and does basic checks on the output.
|
||||
// We can not check images upgrades because we might use unpublished images. CLI uses public CDN to check for available images.
|
||||
func runUpgradeCheck(require *require.Assertions, cli, targetKubernetes string) {
|
||||
cmd := exec.CommandContext(context.Background(), cli, "upgrade", "check")
|
||||
cmd := exec.CommandContext(context.Background(), cli, "upgrade", "check", "--debug")
|
||||
stdout, stderr, err := runCommandWithSeparateOutputs(cmd)
|
||||
require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
||||
|
||||
@ -356,7 +356,7 @@ func runUpgradeApply(require *require.Assertions, cli string) {
|
||||
tfLogFlag = "--tf-log=DEBUG"
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(context.Background(), cli, "upgrade", "apply", "--force", "--debug", "--yes", tfLogFlag)
|
||||
cmd = exec.CommandContext(context.Background(), cli, "upgrade", "apply", "--debug", "--yes", tfLogFlag)
|
||||
stdout, stderr, err = runCommandWithSeparateOutputs(cmd)
|
||||
require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr))
|
||||
require.NoError(containsUnexepectedMsg(string(stdout)))
|
||||
|
@ -154,8 +154,12 @@ const (
|
||||
TerraformUpgradePlanFile = "plan.zip"
|
||||
// TerraformUpgradeWorkingDir is the directory name for the Terraform workspace being used in an upgrade.
|
||||
TerraformUpgradeWorkingDir = "terraform"
|
||||
// TerraformIAMUpgradeWorkingDir is the directory name for the Terraform IAM workspace being used in an upgrade.
|
||||
TerraformIAMUpgradeWorkingDir = "terraform-iam"
|
||||
// TerraformUpgradeBackupDir is the directory name being used to backup the pre-upgrade state in an upgrade.
|
||||
TerraformUpgradeBackupDir = "terraform-backup"
|
||||
// TerraformIAMUpgradeBackupDir is the directory name being used to backup the pre-upgrade state of iam in an upgrade.
|
||||
TerraformIAMUpgradeBackupDir = "terraform-iam-backup"
|
||||
// UpgradeDir is the name of the directory being used for cluster upgrades.
|
||||
UpgradeDir = "constellation-upgrade"
|
||||
// ControlPlaneDefault is the name of the default control plane worker group.
|
||||
|
@ -3,9 +3,23 @@ load("//bazel/go:go_test.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "helm",
|
||||
srcs = ["helm.go"],
|
||||
srcs = [
|
||||
"helm.go",
|
||||
"install.go",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/internal/deploy/helm",
|
||||
visibility = ["//:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/constants",
|
||||
"//internal/logger",
|
||||
"//internal/retry",
|
||||
"@io_k8s_apimachinery//pkg/util/wait",
|
||||
"@org_uber_go_zap//:zap",
|
||||
"@sh_helm_helm_v3//pkg/action",
|
||||
"@sh_helm_helm_v3//pkg/chart",
|
||||
"@sh_helm_helm_v3//pkg/chart/loader",
|
||||
"@sh_helm_helm_v3//pkg/cli",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
@ -17,11 +17,12 @@ type Release struct {
|
||||
|
||||
// Releases bundles all helm releases to be deployed to Constellation.
|
||||
type Releases struct {
|
||||
Cilium Release
|
||||
CertManager Release
|
||||
Operators Release
|
||||
ConstellationServices Release
|
||||
CSI *Release
|
||||
AWSLoadBalancerController *Release
|
||||
CSI *Release
|
||||
Cilium Release
|
||||
CertManager Release
|
||||
ConstellationOperators Release
|
||||
ConstellationServices Release
|
||||
}
|
||||
|
||||
// MergeMaps returns a new map that is the merger of it's inputs.
|
||||
|
159
internal/deploy/helm/install.go
Normal file
159
internal/deploy/helm/install.go
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package helm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/retry"
|
||||
"go.uber.org/zap"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
// timeout is the maximum time given to the helm Installer.
|
||||
timeout = 10 * time.Minute
|
||||
// maximumRetryAttempts is the maximum number of attempts to retry a helm install.
|
||||
maximumRetryAttempts = 3
|
||||
)
|
||||
|
||||
// Installer is a wrapper for a helm install action.
|
||||
type Installer struct {
|
||||
*action.Install
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// NewInstaller creates a new Installer with the given logger.
|
||||
func NewInstaller(log *logger.Logger, kubeconfig string) (*Installer, error) {
|
||||
settings := cli.New()
|
||||
settings.KubeConfig = kubeconfig
|
||||
|
||||
actionConfig := &action.Configuration{}
|
||||
if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace,
|
||||
"secret", log.Infof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
action := action.NewInstall(actionConfig)
|
||||
action.Namespace = constants.HelmNamespace
|
||||
action.Timeout = timeout
|
||||
|
||||
return &Installer{
|
||||
action,
|
||||
log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InstallChart is the generic install function for helm charts.
|
||||
// When timeout is nil, the default timeout is used.
|
||||
func (h *Installer) InstallChart(ctx context.Context, release Release) error {
|
||||
return h.InstallChartWithValues(ctx, release, nil)
|
||||
}
|
||||
|
||||
// InstallChartWithValues is the generic install function for helm charts with custom values.
|
||||
func (h *Installer) InstallChartWithValues(ctx context.Context, release Release, extraValues map[string]any) error {
|
||||
mergedVals := MergeMaps(release.Values, extraValues)
|
||||
h.ReleaseName = release.ReleaseName
|
||||
if err := h.SetWaitMode(release.WaitMode); err != nil {
|
||||
return err
|
||||
}
|
||||
return h.install(ctx, release.Chart, mergedVals)
|
||||
}
|
||||
|
||||
// install tries to install the given chart and aborts after ~5 tries.
|
||||
// The function will wait 30 seconds before retrying a failed installation attempt.
|
||||
// After 3 tries, the retrier will be canceled and the function returns with an error.
|
||||
func (h *Installer) install(ctx context.Context, chartRaw []byte, values map[string]any) error {
|
||||
var retries int
|
||||
retriable := func(err error) bool {
|
||||
// abort after maximumRetryAttempts tries.
|
||||
if retries >= maximumRetryAttempts {
|
||||
return false
|
||||
}
|
||||
retries++
|
||||
// only retry if atomic is set
|
||||
// otherwise helm doesn't uninstall
|
||||
// the release on failure
|
||||
if !h.Atomic {
|
||||
return false
|
||||
}
|
||||
// check if error is retriable
|
||||
return wait.Interrupted(err) ||
|
||||
strings.Contains(err.Error(), "connection refused")
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(chartRaw)
|
||||
chart, err := loader.LoadArchive(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("helm load archive: %w", err)
|
||||
}
|
||||
|
||||
doer := installDoer{
|
||||
h,
|
||||
chart,
|
||||
values,
|
||||
h.log,
|
||||
}
|
||||
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, retriable)
|
||||
|
||||
retryLoopStartTime := time.Now()
|
||||
if err := retrier.Do(ctx); err != nil {
|
||||
return fmt.Errorf("helm install: %w", err)
|
||||
}
|
||||
retryLoopFinishDuration := time.Since(retryLoopStartTime)
|
||||
h.log.With(zap.String("chart", chart.Name()), zap.Duration("duration", retryLoopFinishDuration)).Infof("Helm chart installation finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWaitMode sets the wait mode of the installer.
|
||||
func (h *Installer) SetWaitMode(waitMode WaitMode) error {
|
||||
switch waitMode {
|
||||
case WaitModeNone:
|
||||
h.Wait = false
|
||||
h.Atomic = false
|
||||
case WaitModeWait:
|
||||
h.Wait = true
|
||||
h.Atomic = false
|
||||
case WaitModeAtomic:
|
||||
h.Wait = true
|
||||
h.Atomic = true
|
||||
default:
|
||||
return fmt.Errorf("unknown wait mode %q", waitMode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// installDoer is a help struct to enable retrying helm's install action.
|
||||
type installDoer struct {
|
||||
Installer *Installer
|
||||
chart *chart.Chart
|
||||
values map[string]any
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// Do logs which chart is installed and tries to install it.
|
||||
func (i installDoer) Do(ctx context.Context) error {
|
||||
i.log.With(zap.String("chart", i.chart.Name())).Infof("Trying to install Helm chart")
|
||||
|
||||
if _, err := i.Installer.RunWithContext(ctx, i.chart, i.values); err != nil {
|
||||
i.log.With(zap.Error(err), zap.String("chart", i.chart.Name())).Errorf("Helm chart installation failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user