Refactor Terraform to have all ports in a list (#2409)

* terraform: aws refactoring

* terraform: gcp refactoring

* terraform: azure refactoring
This commit is contained in:
3u13r 2023-10-05 12:34:02 +02:00 committed by GitHub
parent f69ae26122
commit 1452e64675
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 147 additions and 304 deletions

View File

@ -17,27 +17,23 @@ provider "aws" {
}
locals {
uid = random_id.uid.hex
name = "${var.name}-${local.uid}"
initSecretHash = random_password.initSecret.bcrypt_hash
ports_node_range = "30000-32767"
ports_kubernetes = "6443"
ports_bootstrapper = "9000"
ports_konnectivity = "8132"
ports_verify = "30081"
ports_recovery = "9999"
ports_debugd = "4000"
ports_join = "30090"
uid = random_id.uid.hex
name = "${var.name}-${local.uid}"
initSecretHash = random_password.initSecret.bcrypt_hash
ports_node_range = "30000-32767"
load_balancer_ports = flatten([
{ name = "kubernetes", port = "6443", health_check = "HTTPS" },
{ name = "bootstrapper", port = "9000", health_check = "TCP" },
{ name = "verify", port = "30081", health_check = "TCP" },
{ name = "konnectivity", port = "8132", health_check = "TCP" },
{ name = "recovery", port = "9999", health_check = "TCP" },
{ name = "join", port = "30090", health_check = "TCP" },
var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [],
])
target_group_arns = {
control-plane : flatten([
module.load_balancer_target_bootstrapper.target_group_arn,
module.load_balancer_target_kubernetes.target_group_arn,
module.load_balancer_target_verify.target_group_arn,
module.load_balancer_target_recovery.target_group_arn,
module.load_balancer_target_konnectivity.target_group_arn,
module.load_balancer_target_join.target_group_arn,
var.debug ? [module.load_balancer_target_debugd[0].target_group_arn] : [],
])
control-plane : [
for port in local.load_balancer_ports : module.load_balancer_targets[port.name].target_group_arn
]
worker : []
}
iam_instance_profile = {
@ -142,36 +138,15 @@ resource "aws_security_group" "security_group" {
description = "K8s node ports"
}
ingress {
from_port = local.ports_bootstrapper
to_port = local.ports_bootstrapper
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "bootstrapper"
}
ingress {
from_port = local.ports_kubernetes
to_port = local.ports_kubernetes
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "kubernetes"
}
ingress {
from_port = local.ports_konnectivity
to_port = local.ports_konnectivity
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "konnectivity"
}
ingress {
from_port = local.ports_recovery
to_port = local.ports_recovery
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "recovery"
dynamic "ingress" {
for_each = local.load_balancer_ports
content {
description = ingress.value.name
from_port = ingress.value.port
to_port = ingress.value.port
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
ingress {
@ -182,16 +157,6 @@ resource "aws_security_group" "security_group" {
description = "allow all internal"
}
dynamic "ingress" {
for_each = var.debug ? [1] : []
content {
from_port = local.ports_debugd
to_port = local.ports_debugd
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "debugd"
}
}
}
resource "aws_cloudwatch_log_group" "log_group" {
@ -200,76 +165,16 @@ resource "aws_cloudwatch_log_group" "log_group" {
tags = local.tags
}
module "load_balancer_target_bootstrapper" {
module "load_balancer_targets" {
for_each = { for port in local.load_balancer_ports : port.name => port }
source = "./modules/load_balancer_target"
name = "${local.name}-bootstrapper"
name = "${local.name}-${each.value.name}"
port = each.value.port
healthcheck_protocol = each.value.health_check
healthcheck_path = each.value.name == "kubernetes" ? "/readyz" : ""
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_bootstrapper
tags = local.tags
healthcheck_protocol = "TCP"
}
module "load_balancer_target_kubernetes" {
source = "./modules/load_balancer_target"
name = "${local.name}-kubernetes"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_kubernetes
tags = local.tags
healthcheck_protocol = "HTTPS"
healthcheck_path = "/readyz"
}
module "load_balancer_target_verify" {
source = "./modules/load_balancer_target"
name = "${local.name}-verify"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_verify
tags = local.tags
healthcheck_protocol = "TCP"
}
module "load_balancer_target_recovery" {
source = "./modules/load_balancer_target"
name = "${local.name}-recovery"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_recovery
tags = local.tags
healthcheck_protocol = "TCP"
}
module "load_balancer_target_debugd" {
count = var.debug ? 1 : 0 // only deploy debugd in debug mode
source = "./modules/load_balancer_target"
name = "${local.name}-debugd"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_debugd
tags = local.tags
healthcheck_protocol = "TCP"
}
module "load_balancer_target_konnectivity" {
source = "./modules/load_balancer_target"
name = "${local.name}-konnectivity"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_konnectivity
tags = local.tags
healthcheck_protocol = "TCP"
}
module "load_balancer_target_join" {
source = "./modules/load_balancer_target"
name = "${local.name}-join"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_join
tags = local.tags
healthcheck_protocol = "TCP"
}
module "instance_group" {
@ -300,3 +205,39 @@ module "instance_group" {
{ "kubernetes.io/cluster/${local.name}" = "owned" }
)
}
# TODO(31u3r): Remove once 2.12 is released
moved {
from = module.load_balancer_target_konnectivity
to = module.load_balancer_targets["konnectivity"]
}
moved {
from = module.load_balancer_target_verify
to = module.load_balancer_targets["verify"]
}
moved {
from = module.load_balancer_target_recovery
to = module.load_balancer_targets["recovery"]
}
moved {
from = module.load_balancer_target_join
to = module.load_balancer_targets["join"]
}
moved {
from = module.load_balancer_target_debugd[0]
to = module.load_balancer_targets["debugd"]
}
moved {
from = module.load_balancer_target_kubernetes
to = module.load_balancer_targets["kubernetes"]
}
moved {
from = module.load_balancer_target_bootstrapper
to = module.load_balancer_targets["bootstrapper"]
}

View File

@ -27,15 +27,17 @@ locals {
constellation-uid = local.uid,
}
ports_node_range = "30000-32767"
ports_kubernetes = "6443"
ports_bootstrapper = "9000"
ports_konnectivity = "8132"
ports_verify = "30081"
ports_recovery = "9999"
ports_join = "30090"
ports_debugd = "4000"
cidr_vpc_subnet_nodes = "192.168.178.0/24"
cidr_vpc_subnet_pods = "10.10.0.0/16"
ports = flatten([
{ name = "kubernetes", port = "6443", health_check_protocol = "Https", path = "/readyz", priority = 100 },
{ name = "bootstrapper", port = "9000", health_check_protocol = "Tcp", path = null, priority = 101 },
{ name = "verify", port = "30081", health_check_protocol = "Tcp", path = null, priority = 102 },
{ name = "konnectivity", port = "8132", health_check_protocol = "Tcp", path = null, priority = 103 },
{ name = "recovery", port = "9999", health_check_protocol = "Tcp", path = null, priority = 104 },
{ name = "join", port = "30090", health_check_protocol = "Tcp", path = null, priority = 105 },
var.debug ? [{ name = "debugd", port = "4000", health_check_protocol = "Tcp", path = null, priority = 106 }] : [],
])
// wildcard_lb_dns_name is the DNS name of the load balancer with a wildcard for the name.
// example: given "name-1234567890.location.cloudapp.azure.com" it will return "*.location.cloudapp.azure.com"
wildcard_lb_dns_name = replace(data.azurerm_public_ip.loadbalancer_ip.fqdn, "/^[^.]*\\./", "*.")
@ -150,60 +152,19 @@ resource "azurerm_lb" "loadbalancer" {
module "loadbalancer_backend_control_plane" {
source = "./modules/load_balancer_backend"
name = "${local.name}-control-plane"
loadbalancer_id = azurerm_lb.loadbalancer.id
ports = flatten([
{
name = "bootstrapper",
port = local.ports_bootstrapper,
protocol = "Tcp",
path = null
},
{
name = "kubernetes",
port = local.ports_kubernetes,
protocol = "Https",
path = "/readyz"
},
{
name = "konnectivity",
port = local.ports_konnectivity,
protocol = "Tcp",
path = null
},
{
name = "verify",
port = local.ports_verify,
protocol = "Tcp",
path = null
},
{
name = "recovery",
port = local.ports_recovery,
protocol = "Tcp",
path = null
},
{
name = "join",
port = local.ports_join,
protocol = "Tcp",
path = null
},
var.debug ? [{
name = "debugd",
port = local.ports_debugd,
protocol = "Tcp",
path = null
}] : [],
])
name = "${local.name}-control-plane"
loadbalancer_id = azurerm_lb.loadbalancer.id
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
ports = local.ports
}
module "loadbalancer_backend_worker" {
source = "./modules/load_balancer_backend"
name = "${local.name}-worker"
loadbalancer_id = azurerm_lb.loadbalancer.id
ports = []
name = "${local.name}-worker"
loadbalancer_id = azurerm_lb.loadbalancer.id
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
ports = []
}
resource "azurerm_lb_backend_address_pool" "all" {
@ -233,15 +194,10 @@ resource "azurerm_network_security_group" "security_group" {
tags = local.tags
dynamic "security_rule" {
for_each = flatten([
{ name = "noderange", priority = 100, dest_port_range = local.ports_node_range },
{ name = "kubernetes", priority = 101, dest_port_range = local.ports_kubernetes },
{ name = "bootstrapper", priority = 102, dest_port_range = local.ports_bootstrapper },
{ name = "konnectivity", priority = 103, dest_port_range = local.ports_konnectivity },
{ name = "join", priority = 104, dest_port_range = local.ports_recovery },
{ name = "recovery", priority = 105, dest_port_range = local.ports_join },
var.debug ? [{ name = "debugd", priority = 106, dest_port_range = local.ports_debugd }] : [],
])
for_each = concat(
local.ports,
[{ name = "nodeports", port = local.ports_node_range, priority = 200 }]
)
content {
name = security_rule.value.name
priority = security_rule.value.priority
@ -249,7 +205,7 @@ resource "azurerm_network_security_group" "security_group" {
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = security_rule.value.dest_port_range
destination_port_range = security_rule.value.port
source_address_prefix = "*"
destination_address_prefix = "*"
}
@ -298,12 +254,3 @@ data "azurerm_user_assigned_identity" "uaid" {
resource_group_name = local.uai_resource_group
}
moved {
from = module.scale_set_control_plane
to = module.scale_set_group["control_plane_default"]
}
moved {
from = module.scale_set_worker
to = module.scale_set_group["worker_default"]
}

View File

@ -18,7 +18,7 @@ resource "azurerm_lb_probe" "health_probes" {
loadbalancer_id = var.loadbalancer_id
name = each.value.name
port = each.value.port
protocol = each.value.protocol
protocol = each.value.health_check_protocol
request_path = each.value.path
interval_in_seconds = 5
}
@ -31,7 +31,7 @@ resource "azurerm_lb_rule" "rules" {
protocol = "Tcp"
frontend_port = each.value.port
backend_port = each.value.port
frontend_ip_configuration_name = "PublicIPAddress"
frontend_ip_configuration_name = var.frontend_ip_configuration_name
backend_address_pool_ids = [azurerm_lb_backend_address_pool.backend_pool.id]
probe_id = each.value.id
disable_outbound_snat = true

View File

@ -4,6 +4,11 @@ variable "name" {
description = "Base name of the cluster."
}
variable "frontend_ip_configuration_name" {
type = string
description = "The name of the frontend IP configuration to use for the load balancer."
}
variable "loadbalancer_id" {
type = string
description = "The ID of the load balancer to add the backend to."
@ -11,10 +16,10 @@ variable "loadbalancer_id" {
variable "ports" {
type = list(object({
name = string
port = number
protocol = string
path = string
name = string
port = number
health_check_protocol = string
path = string
}))
description = "The ports to add to the backend. Protocol can be either 'Tcp' or 'Https'. Path is only used for 'Https' protocol and can otherwise be null."
}

View File

@ -37,24 +37,19 @@ locals {
constellation-uid = local.uid,
}
ports_node_range = "30000-32767"
ports_kubernetes = "6443"
ports_bootstrapper = "9000"
ports_konnectivity = "8132"
ports_verify = "30081"
ports_recovery = "9999"
ports_join = "30090"
ports_debugd = "4000"
cidr_vpc_subnet_nodes = "192.168.178.0/24"
cidr_vpc_subnet_pods = "10.10.0.0/16"
cidr_vpc_subnet_proxy = "192.168.179.0/24"
cidr_vpc_subnet_ilb = "192.168.180.0/24"
kube_env = "AUTOSCALER_ENV_VARS: kube_reserved=cpu=1060m,memory=1019Mi,ephemeral-storage=41Gi;node_labels=;os=linux;os_distribution=cos;evictionHard="
control_plane_named_ports = flatten([
{ name = "kubernetes", port = local.ports_kubernetes },
{ name = "bootstrapper", port = local.ports_bootstrapper },
{ name = "verify", port = local.ports_verify },
{ name = "konnectivity", port = local.ports_konnectivity },
{ name = "recovery", port = local.ports_recovery },
{ name = "join", port = local.ports_join },
var.debug ? [{ name = "debugd", port = local.ports_debugd }] : [],
{ name = "kubernetes", port = "6443", health_check = "HTTPS" },
{ name = "bootstrapper", port = "9000", health_check = "TCP" },
{ name = "verify", port = "30081", health_check = "TCP" },
{ name = "konnectivity", port = "8132", health_check = "TCP" },
{ name = "recovery", port = "9999", health_check = "TCP" },
{ name = "join", port = "30090", health_check = "TCP" },
var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [],
])
node_groups_by_role = {
for name, node_group in var.node_groups : node_group.role => name...
@ -117,13 +112,8 @@ resource "google_compute_firewall" "firewall_external" {
allow {
protocol = "tcp"
ports = flatten([
local.ports_node_range,
local.ports_bootstrapper,
local.ports_kubernetes,
local.ports_konnectivity,
local.ports_recovery,
local.ports_join,
var.debug ? [local.ports_debugd] : [],
[for port in local.control_plane_named_ports : port.port],
[local.ports_node_range],
])
}
@ -182,90 +172,50 @@ resource "google_compute_global_address" "loadbalancer_ip" {
name = local.name
}
module "loadbalancer_kube" {
module "loadbalancer_public" {
// for every port in control_plane_named_ports if internal lb is disabled
for_each = { for port in local.control_plane_named_ports : port.name => port }
source = "./modules/loadbalancer"
name = local.name
health_check = "HTTPS"
backend_port_name = "kubernetes"
backend_port_name = each.value.name
port = each.value.port
health_check = each.value.health_check
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_kubernetes
frontend_labels = merge(local.labels, { constellation-use = "kubernetes" })
}
module "loadbalancer_boot" {
source = "./modules/loadbalancer"
name = local.name
health_check = "TCP"
backend_port_name = "bootstrapper"
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_bootstrapper
frontend_labels = merge(local.labels, { constellation-use = "bootstrapper" })
}
module "loadbalancer_verify" {
source = "./modules/loadbalancer"
name = local.name
health_check = "TCP"
backend_port_name = "verify"
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_verify
frontend_labels = merge(local.labels, { constellation-use = "verify" })
}
module "loadbalancer_konnectivity" {
source = "./modules/loadbalancer"
name = local.name
health_check = "TCP"
backend_port_name = "konnectivity"
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_konnectivity
frontend_labels = merge(local.labels, { constellation-use = "konnectivity" })
}
module "loadbalancer_recovery" {
source = "./modules/loadbalancer"
name = local.name
health_check = "TCP"
backend_port_name = "recovery"
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_recovery
frontend_labels = merge(local.labels, { constellation-use = "recovery" })
}
module "loadbalancer_join" {
source = "./modules/loadbalancer"
name = local.name
health_check = "TCP"
backend_port_name = "join"
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_join
frontend_labels = merge(local.labels, { constellation-use = "join" })
}
module "loadbalancer_debugd" {
count = var.debug ? 1 : 0 // only deploy debugd in debug mode
source = "./modules/loadbalancer"
name = local.name
health_check = "TCP"
backend_port_name = "debugd"
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
port = local.ports_debugd
frontend_labels = merge(local.labels, { constellation-use = "debugd" })
frontend_labels = merge(local.labels, { constellation-use = each.value.name })
}
moved {
from = module.instance_group_control_plane
to = module.instance_group["control_plane_default"]
from = module.loadbalancer_boot
to = module.loadbalancer_public["bootstrapper"]
}
moved {
from = module.instance_group_worker
to = module.instance_group["worker_default"]
from = module.loadbalancer_kube
to = module.loadbalancer_public["kubernetes"]
}
moved {
from = module.loadbalancer_verify
to = module.loadbalancer_public["verify"]
}
moved {
from = module.loadbalancer_konnectivity
to = module.loadbalancer_public["konnectivity"]
}
moved {
from = module.loadbalancer_recovery
to = module.loadbalancer_public["recovery"]
}
moved {
from = module.loadbalancer_join
to = module.loadbalancer_public["join"]
}
moved {
from = module.loadbalancer_debugd[0]
to = module.loadbalancer_public["debugd"]
}