mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
working state
This commit is contained in:
parent
a172b6f881
commit
c91d5a7b74
@ -72,8 +72,11 @@ func (h *Client) InstallAWSLoadBalancerController(ctx context.Context, release h
|
||||
if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.install(ctx, release.Chart, release.Values)
|
||||
err := h.install(ctx, release.Chart, release.Values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallConstellationServices installs the constellation-services chart. In the future this chart should bundle all microservices.
|
||||
|
@ -34,5 +34,5 @@ type helmClient interface {
|
||||
InstallCertManager(ctx context.Context, release helm.Release) error
|
||||
InstallOperators(ctx context.Context, release helm.Release, extraVals map[string]any) error
|
||||
InstallConstellationServices(ctx context.Context, release helm.Release, extraVals map[string]any) error
|
||||
InstallAWSLoadBalancerController(ctx context.Context, release helm.Release) error
|
||||
InstallAWSLoadBalancerController(context.Context, helm.Release) error
|
||||
}
|
||||
|
@ -242,8 +242,8 @@ func (k *KubeWrapper) InitCluster(
|
||||
return nil, fmt.Errorf("installing cert-manager: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Installing AWS Load Balancer Controller")
|
||||
if helmReleases.AWSLoadBalancerController.ReleaseName != "" {
|
||||
log.Infof("Installing AWS Load Balancer Controller")
|
||||
if err = k.helmClient.InstallAWSLoadBalancerController(ctx, helmReleases.AWSLoadBalancerController); err != nil {
|
||||
return nil, fmt.Errorf("installing AWS Load Balancer Controller: %w", err)
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ go_library(
|
||||
"backup.go",
|
||||
"client.go",
|
||||
"helm.go",
|
||||
"installer.go",
|
||||
"loader.go",
|
||||
"values.go",
|
||||
],
|
||||
@ -408,14 +409,20 @@ go_library(
|
||||
"//internal/constants",
|
||||
"//internal/deploy/helm",
|
||||
"//internal/file",
|
||||
"//internal/kubernetes/kubectl",
|
||||
"//internal/logger",
|
||||
"//internal/retry",
|
||||
"//internal/semver",
|
||||
"//internal/versions",
|
||||
"@com_github_pkg_errors//:errors",
|
||||
"@com_github_spf13_afero//:afero",
|
||||
"@io_k8s_api//core/v1:core",
|
||||
"@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:apiextensions",
|
||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||
"@io_k8s_apimachinery//pkg/runtime/schema",
|
||||
"@io_k8s_apimachinery//pkg/util/wait",
|
||||
"@io_k8s_sigs_yaml//:yaml",
|
||||
"@org_uber_go_zap//:zap",
|
||||
"@sh_helm_helm//pkg/ignore",
|
||||
"@sh_helm_helm_v3//pkg/action",
|
||||
"@sh_helm_helm_v3//pkg/chart",
|
||||
|
@ -61,11 +61,16 @@ resources: {}
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
# TODO; changed
|
||||
nodeSelector: {
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
}
|
||||
|
||||
tolerations: []
|
||||
# TODO: changed
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
# affinity specifies a custom affinity for the controller pods
|
||||
affinity: {}
|
||||
|
311
cli/internal/helm/installer.go
Normal file
311
cli/internal/helm/installer.go
Normal file
@ -0,0 +1,311 @@
|
||||
package helm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes/kubectl"
|
||||
"github.com/edgelesssys/constellation/v2/internal/logger"
|
||||
"github.com/edgelesssys/constellation/v2/internal/retry"
|
||||
"go.uber.org/zap"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
func Install(kubeconfig string) {
|
||||
loader := NewLoader(cloudprovider.AWS, "v1.26.6", "constell-aws")
|
||||
builder := ChartBuilder{
|
||||
i: loader,
|
||||
}
|
||||
builder.AddChart(awsInfo)
|
||||
release, err := builder.Load(helm.WaitModeAtomic)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
installer, err := New(logger.New(logger.PlainLog, -1), kubeconfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
kubectl := kubectl.New()
|
||||
// Build the rest.Config object from the KUBECONFIG file
|
||||
cfgB, err := os.ReadFile(kubeconfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to read kubeconfig file: %w", err))
|
||||
}
|
||||
err = kubectl.Initialize(cfgB)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = installer.InstallAWSLoadBalancerController(context.Background(), kubectl, release.AWSLoadBalancerController)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
const (
|
||||
// timeout is the maximum time given to the helm Installer.
|
||||
timeout = 5 * time.Minute
|
||||
// maximumRetryAttempts is the maximum number of attempts to retry a helm install.
|
||||
maximumRetryAttempts = 3
|
||||
)
|
||||
|
||||
// Installer is used to install microservice during cluster initialization. It is a wrapper for a helm install action.
|
||||
type Installer struct {
|
||||
*action.Install
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// New creates a new Installer with the given logger.
|
||||
func New(log *logger.Logger, kubeconfig string) (*Installer, error) {
|
||||
settings := cli.New()
|
||||
settings.KubeConfig = kubeconfig
|
||||
|
||||
actionConfig := &action.Configuration{}
|
||||
if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace,
|
||||
"secret", log.Infof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
action := action.NewInstall(actionConfig)
|
||||
action.Namespace = constants.HelmNamespace
|
||||
action.Timeout = timeout
|
||||
|
||||
return &Installer{
|
||||
action,
|
||||
log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Client provides the functions to talk to the k8s API.
|
||||
type k8sClient interface {
|
||||
Initialize(kubeconfig []byte) error
|
||||
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
|
||||
AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error
|
||||
AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error
|
||||
ListAllNamespaces(ctx context.Context) (*corev1.NamespaceList, error)
|
||||
AnnotateNode(ctx context.Context, nodeName, annotationKey, annotationValue string) error
|
||||
EnforceCoreDNSSpread(ctx context.Context) error
|
||||
}
|
||||
|
||||
// InstallAWSLoadBalancerController installs the AWS Load Balancer Controller.
|
||||
// fails when --skip-helm-wait due to needing cert-manager to be ready
|
||||
func (h *Installer) InstallAWSLoadBalancerController(ctx context.Context, kubectl k8sClient, release helm.Release) error {
|
||||
h.ReleaseName = release.ReleaseName
|
||||
if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
return err
|
||||
}
|
||||
err := h.install(ctx, release.Chart, release.Values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//// InstallConstellationServices installs the constellation-services chart. In the future this chart should bundle all microservices.
|
||||
//func (h *Installer) InstallConstellationServices(ctx context.Context, release helm.Release, extraVals map[string]any) error {
|
||||
// h.ReleaseName = release.ReleaseName
|
||||
// if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// mergedVals := helm.MergeMaps(release.Values, extraVals)
|
||||
|
||||
// return h.install(ctx, release.Chart, mergedVals)
|
||||
//}
|
||||
|
||||
//// InstallCertManager installs the cert-manager chart.
|
||||
//func (h *Installer) InstallCertManager(ctx context.Context, release helm.Release) error {
|
||||
// h.ReleaseName = release.ReleaseName
|
||||
// h.Timeout = 10 * time.Minute
|
||||
// if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return h.install(ctx, release.Chart, release.Values)
|
||||
//}
|
||||
|
||||
//// InstallOperators installs the Constellation Operators.
|
||||
//func (h *Installer) InstallOperators(ctx context.Context, release helm.Release, extraVals map[string]any) error {
|
||||
// h.ReleaseName = release.ReleaseName
|
||||
// if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// mergedVals := helm.MergeMaps(release.Values, extraVals)
|
||||
|
||||
// return h.install(ctx, release.Chart, mergedVals)
|
||||
//}
|
||||
|
||||
//// InstallCilium sets up the cilium pod network.
|
||||
//func (h *Installer) InstallCilium(ctx context.Context, kubectl k8sapi.Installer, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
|
||||
// h.ReleaseName = release.ReleaseName
|
||||
// if err := h.setWaitMode(release.WaitMode); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// timeoutS := int64(10)
|
||||
// // allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
|
||||
// tolerations := []corev1.Toleration{
|
||||
// {
|
||||
// Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
// Value: "true",
|
||||
// Effect: corev1.TaintEffectNoSchedule,
|
||||
// },
|
||||
// {
|
||||
// Key: "node.kubernetes.io/unreachable",
|
||||
// Operator: corev1.TolerationOpExists,
|
||||
// Effect: corev1.TaintEffectNoExecute,
|
||||
// TolerationSeconds: &timeoutS,
|
||||
// },
|
||||
// }
|
||||
// if err := kubectl.AddTolerationsToDeployment(ctx, tolerations, "coredns", "kube-system"); err != nil {
|
||||
// return fmt.Errorf("failed to add tolerations to coredns deployment: %w", err)
|
||||
// }
|
||||
// if err := kubectl.EnforceCoreDNSSpread(ctx); err != nil {
|
||||
// return fmt.Errorf("failed to enforce CoreDNS spread: %w", err)
|
||||
// }
|
||||
|
||||
// switch in.CloudProvider {
|
||||
// case "aws", "azure", "openstack", "qemu":
|
||||
// return h.installCiliumGeneric(ctx, release, in.LoadBalancerEndpoint)
|
||||
// case "gcp":
|
||||
// return h.installCiliumGCP(ctx, release, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR, in.LoadBalancerEndpoint)
|
||||
// default:
|
||||
// return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
|
||||
// }
|
||||
//}
|
||||
|
||||
//// installCiliumGeneric installs cilium with the given load balancer endpoint.
|
||||
//// This is used for cloud providers that do not require special server-side configuration.
|
||||
//// Currently this is AWS, Azure, and QEMU.
|
||||
//func (h *Installer) installCiliumGeneric(ctx context.Context, release helm.Release, kubeAPIEndpoint string) error {
|
||||
// host := kubeAPIEndpoint
|
||||
// release.Values["k8sServiceHost"] = host
|
||||
// release.Values["k8sServicePort"] = strconv.Itoa(constants.KubernetesPort)
|
||||
|
||||
// return h.install(ctx, release.Chart, release.Values)
|
||||
//}
|
||||
|
||||
// func (h *Installer) installCiliumGCP(ctx context.Context, release helm.Release, nodeName, nodePodCIDR, subnetworkPodCIDR, kubeAPIEndpoint string) error {
|
||||
// out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", constants.ControlPlaneAdminConfFilename, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
|
||||
// if err != nil {
|
||||
// err = errors.New(string(out))
|
||||
// return err
|
||||
// }
|
||||
|
||||
// host, port, err := net.SplitHostPort(kubeAPIEndpoint)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// // configure pod network CIDR
|
||||
// release.Values["ipv4NativeRoutingCIDR"] = subnetworkPodCIDR
|
||||
// release.Values["strictModeCIDR"] = subnetworkPodCIDR
|
||||
// release.Values["k8sServiceHost"] = host
|
||||
// if port != "" {
|
||||
// release.Values["k8sServicePort"] = port
|
||||
// }
|
||||
|
||||
// return h.install(ctx, release.Chart, release.Values)
|
||||
//}
|
||||
|
||||
// install tries to install the given chart and aborts after ~5 tries.
|
||||
// The function will wait 30 seconds before retrying a failed installation attempt.
|
||||
// After 3 tries, the retrier will be canceled and the function returns with an error.
|
||||
func (h *Installer) install(ctx context.Context, chartRaw []byte, values map[string]any) error {
|
||||
var retries int
|
||||
retriable := func(err error) bool {
|
||||
// abort after maximumRetryAttempts tries.
|
||||
if retries >= maximumRetryAttempts {
|
||||
return false
|
||||
}
|
||||
retries++
|
||||
// only retry if atomic is set
|
||||
// otherwise helm doesn't uninstall
|
||||
// the release on failure
|
||||
if !h.Atomic {
|
||||
return false
|
||||
}
|
||||
// check if error is retriable
|
||||
return wait.Interrupted(err) ||
|
||||
strings.Contains(err.Error(), "connection refused")
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(chartRaw)
|
||||
chart, err := loader.LoadArchive(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("helm load archive: %w", err)
|
||||
}
|
||||
|
||||
doer := installDoer{
|
||||
h,
|
||||
chart,
|
||||
values,
|
||||
h.log,
|
||||
}
|
||||
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, retriable)
|
||||
|
||||
retryLoopStartTime := time.Now()
|
||||
if err := retrier.Do(ctx); err != nil {
|
||||
return fmt.Errorf("helm install: %w", err)
|
||||
}
|
||||
retryLoopFinishDuration := time.Since(retryLoopStartTime)
|
||||
h.log.With(zap.String("chart", chart.Name()), zap.Duration("duration", retryLoopFinishDuration)).Infof("Helm chart installation finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Installer) setWaitMode(waitMode helm.WaitMode) error {
|
||||
switch waitMode {
|
||||
case helm.WaitModeNone:
|
||||
h.Wait = false
|
||||
h.Atomic = false
|
||||
case helm.WaitModeWait:
|
||||
h.Wait = true
|
||||
h.Atomic = false
|
||||
case helm.WaitModeAtomic:
|
||||
h.Wait = true
|
||||
h.Atomic = true
|
||||
default:
|
||||
return fmt.Errorf("unknown wait mode %q", waitMode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// installDoer is a help struct to enable retrying helm's install action.
|
||||
type installDoer struct {
|
||||
Installer *Installer
|
||||
chart *chart.Chart
|
||||
values map[string]any
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// Do logs which chart is installed and tries to install it.
|
||||
func (i installDoer) Do(ctx context.Context) error {
|
||||
i.log.With(zap.String("chart", i.chart.Name())).Infof("Trying to install Helm chart")
|
||||
|
||||
if _, err := i.Installer.RunWithContext(ctx, i.chart, i.values); err != nil {
|
||||
i.log.With(zap.Error(err), zap.String("chart", i.chart.Name())).Errorf("Helm chart installation failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -104,6 +104,29 @@ func NewLoader(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion,
|
||||
}
|
||||
}
|
||||
|
||||
type ChartBuilder struct {
|
||||
charts []chartInfo
|
||||
i *ChartLoader
|
||||
}
|
||||
|
||||
func (b *ChartBuilder) AddChart(info chartInfo) {
|
||||
b.charts = append(b.charts, info)
|
||||
}
|
||||
|
||||
func (b *ChartBuilder) Load(helmWaitMode helm.WaitMode) (helm.Releases, error) {
|
||||
var releases helm.Releases
|
||||
for _, info := range b.charts {
|
||||
awsRelease, err := b.i.loadRelease(info, helmWaitMode)
|
||||
if err != nil {
|
||||
return helm.Releases{}, fmt.Errorf("loading aws-services: %w", err)
|
||||
}
|
||||
releases.AWSLoadBalancerController = awsRelease
|
||||
}
|
||||
return releases, nil
|
||||
}
|
||||
|
||||
type HelmInstaller struct{}
|
||||
|
||||
// Load the embedded helm charts.
|
||||
func (i *ChartLoader) Load(config *config.Config, conformanceMode bool, helmWaitMode helm.WaitMode, masterSecret, salt []byte) ([]byte, error) {
|
||||
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
|
||||
@ -194,7 +217,6 @@ func (i *ChartLoader) loadAWSLoadBalancerControllerValues() (map[string]any, err
|
||||
return nil, err
|
||||
}
|
||||
values["clusterName"] = i.clusterName
|
||||
// TODO add custom settings like nodeSelector here or keep in values.yaml?
|
||||
return values, nil
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,29 @@ func TestLoad(t *testing.T) {
|
||||
assert.NotNil(chart.Dependencies())
|
||||
}
|
||||
|
||||
func TestLoadDeploy(t *testing.T) {
|
||||
chart, err := loadChartsDir(helmFS, constellationServicesInfo.path) // helmFS, "./charts/edgeless/constellation-services/charts/aws-load-balancer-controller") //
|
||||
require.NoError(t, err)
|
||||
for _, f := range chart.Raw {
|
||||
// fmt.Println("UNFILTERED", f.Name)
|
||||
if strings.Contains(f.Name, "charts/aws-load-balancer-controller/crds/kustomization.yaml") {
|
||||
t.Error("helmignore should have filtered it out", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAWSLoadBalancerValues(t *testing.T) {
|
||||
sut := ChartLoader{
|
||||
clusterName: "testCluster",
|
||||
}
|
||||
val, err := sut.loadAWSLoadBalancerControllerValues()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testCluster", val["clusterName"])
|
||||
// needs to run on control-plane
|
||||
assert.Contains(t, val["nodeSelector"].(map[string]any), "node-role.kubernetes.io/control-plane")
|
||||
assert.Contains(t, val["tolerations"].([]interface{}), map[string]any{"key": "node-role.kubernetes.io/control-plane", "operator": "Exists", "effect": "NoSchedule"})
|
||||
}
|
||||
|
||||
// TestConstellationServices checks if the rendered constellation-services chart produces the expected yaml files.
|
||||
func TestConstellationServices(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
@ -145,8 +168,14 @@ func TestConstellationServices(t *testing.T) {
|
||||
gcpGuestAgentImage: "gcpGuestAgentImage",
|
||||
clusterName: "testCluster",
|
||||
}
|
||||
chart, err := loadChartsDir(helmFS, constellationServicesInfo.path)
|
||||
chart, err := loadChartsDir(helmFS, constellationServicesInfo.path) // helmFS, "./charts/edgeless/constellation-services/charts/aws-load-balancer-controller") //
|
||||
require.NoError(err)
|
||||
for _, f := range chart.Raw {
|
||||
// fmt.Println("UNFILTERED", f.Name)
|
||||
if strings.Contains(f.Name, "crds") {
|
||||
fmt.Println("FOUND", f.Name)
|
||||
}
|
||||
}
|
||||
values, err := chartLoader.loadConstellationServicesValues()
|
||||
require.NoError(err)
|
||||
err = extendConstellationServicesValues(values, tc.config, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
|
||||
|
15
cli/internal/helm/main/BUILD.bazel
Normal file
15
cli/internal/helm/main/BUILD.bazel
Normal file
@ -0,0 +1,15 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "main_lib",
|
||||
srcs = ["main.go"],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/helm/main",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["//cli/internal/helm"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "main",
|
||||
embed = [":main_lib"],
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
)
|
11
cli/internal/helm/main/main.go
Normal file
11
cli/internal/helm/main/main.go
Normal file
@ -0,0 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/cli/internal/helm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
helm.Install(os.Getenv("KUBECONFIG")) // constants.ControlPlaneAdminConfFilename)
|
||||
}
|
@ -63,6 +63,7 @@ resource "aws_iam_policy" "control_plane_policy" {
|
||||
"elasticloadbalancing:ModifyTargetGroupAttributes",
|
||||
"elasticloadbalancing:RegisterTargets",
|
||||
"elasticloadbalancing:DeregisterTargets",
|
||||
"elasticloadbalancing:DescribeTags",
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
|
Loading…
Reference in New Issue
Block a user