aws: use new LB controller to fix SecurityGroup cleanup on K8s service deletion (#2090)

* add current chart

add current helm chart

* disable service controller for aws ccm

* add new iam roles

* doc AWS internet LB + add to LB test

* pass clusterName to helm for AWS LB

* fix update-aws-lb chart to also include .helmignore

* move chart outside services

* working state

* add subnet tags for AWS subnet discovery

* fix .helmignore load rule with file in subdirectory

* upgrade iam profile

* revert new loader impl since cilium is not correctly loaded

* install chart if not already present during `upgrade apply`

* cleanup PR + fix build + add todos

cleanup PR + add todos

* shared helm pkg for cli install and bootstrapper

* add link to eks docs

* refactor iamMigrationCmd

* delete unused helm.symwallk

* move iammigrate to upgrade pkg

* fixup! delete unused helm.symwallk

* add to upgradecheck

* remove nodeSelector from go code (Otto)

* update iam docs and sort permission + remove duplicate roles

* fix bug in `upgrade check`

* better upgrade check output when svc version upgrade not possible

* pr feedback

* remove force flag in upgrade_test

* use upgrader.GetUpgradeID instead of extra type

* remove todos + fix check

* update doc lb (leo)

* remove bootstrapper helm package

* Update cli/internal/cmd/upgradecheck.go

Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com>

* final nits

* add docs for e2e upgrade test setup

* Apply suggestions from code review

Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com>

* Update cli/internal/helm/loader.go

Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com>

* Update cli/internal/cmd/tfmigrationclient.go

Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com>

* fix daniel review

* link to the iam permissions instead of manually updating them (agreed with leo)

* disable iam upgrade in upgrade apply

---------

Co-authored-by: Daniel Weiße <66256922+daniel-weisse@users.noreply.github.com>
Co-authored-by: Malte Poll
This commit is contained in:
Adrian Stobbe 2023-07-24 10:30:53 +02:00 committed by GitHub
parent 8da6a23aa5
commit a87b7894db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
67 changed files with 3018 additions and 451 deletions

View file

@ -52,7 +52,7 @@ resource "aws_subnet" "private" {
vpc_id = var.vpc_id
cidr_block = cidrsubnet(var.cidr_vpc_subnet_nodes, 4, local.az_number[each.value.name_suffix])
availability_zone = each.key
tags = merge(var.tags, { Name = "${var.name}-subnet-nodes" })
tags = merge(var.tags, { Name = "${var.name}-subnet-nodes" }, { "kubernetes.io/role/internal-elb" = 1 }) # aws-load-balancer-controller needs role annotation
lifecycle {
ignore_changes = [
cidr_block, # required. Legacy subnets used fixed cidr blocks for the single zone that don't match the new scheme.
@ -65,7 +65,7 @@ resource "aws_subnet" "public" {
vpc_id = var.vpc_id
cidr_block = cidrsubnet(var.cidr_vpc_subnet_internet, 4, local.az_number[each.value.name_suffix])
availability_zone = each.key
tags = merge(var.tags, { Name = "${var.name}-subnet-internet" })
tags = merge(var.tags, { Name = "${var.name}-subnet-internet" }, { "kubernetes.io/role/elb" = 1 }) # aws-load-balancer-controller needs role annotation
lifecycle {
ignore_changes = [
cidr_block, # required. Legacy subnets used fixed cidr blocks for the single zone that don't match the new scheme.

View file

@ -51,6 +51,12 @@ resource "aws_iam_policy" "control_plane_policy" {
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:DescribeTargetGroupAttributes",
"elasticloadbalancing:DescribeRules",
"shield:GetSubscriptionState",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:ModifyTargetGroupAttributes",
"elasticloadbalancing:DescribeTags",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",

View file

@ -10,8 +10,7 @@ terraform {
locals {
tags = distinct(sort(concat(var.tags, ["constellation-role-${var.role}"], ["constellation-node-group-${var.node_group_name}"])))
group_uid = random_id.uid.hex
#name = "${var.base_name}-${var.role}" // TODO keep old naming ?
name = "${var.base_name}-${var.role}-${local.group_uid}"
name = "${var.base_name}-${var.role}-${local.group_uid}"
}
resource "random_id" "uid" {