terraform: use single zone loadbalancer frontend on AWS (#1983)

This change is required to ensure we have not tls handshake errors when connecting to the kubernetes api.
Currently, the certificates used by kube-apiserver pods contain a SAN field with the (single) public ip of the loadbalancer.
If we would allow multiple loadbalancer frontend ips, we could encounter cases where the certificate is only valid for one public ip,
while we try to connect to a different ip.
To prevent this, we consciously disable support for the multi-zone loadbalancer frontend on AWS for now.
This will be re-enabled in the future.
This commit is contained in:
Malte Poll 2023-06-30 16:56:31 +02:00 committed by GitHub
parent d95ddd01d3
commit 66f1333c31
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 22 additions and 47 deletions

View file

@ -77,7 +77,11 @@ module "public_private_subnet" {
}
resource "aws_eip" "lb" {
for_each = toset(module.public_private_subnet.all_zones)
# TODO(malt3): use for_each = toset(module.public_private_subnet.all_zones)
# in a future version to support all availability zones in the chosen region
# This should only be done after we migrated to DNS-based addressing for the
# control-plane.
for_each = toset([var.zone])
domain = "vpc"
tags = local.tags
}
@ -92,9 +96,10 @@ resource "aws_lb" "front_end" {
# TODO(malt3): use for_each = toset(module.public_private_subnet.all_zones)
# in a future version to support all availability zones in the chosen region
# without needing to constantly replace the loadbalancer.
# This has to wait until the bootstrapper that we upgrade from (source version) can handle multiple AZs
# This has to wait until the bootstrapper that we upgrade from (source version) use
# DNS-based addressing for the control-plane.
# for_each = toset(module.public_private_subnet.all_zones)
for_each = toset(local.zones)
for_each = toset([var.zone])
content {
subnet_id = module.public_private_subnet.public_subnet_id[subnet_mapping.key]
allocation_id = aws_eip.lb[subnet_mapping.key].id
@ -267,6 +272,7 @@ module "instance_group" {
local.tags,
{ Name = local.name },
{ constellation-role = each.value.role },
{ constellation-node-group = each.key },
{ constellation-uid = local.uid },
{ constellation-init-secret-hash = local.initSecretHash },
{ "kubernetes.io/cluster/${local.name}" = "owned" }

View file

@ -15,7 +15,7 @@ locals {
# 0 => 192.168.176.0/24 (unused private subnet cidr)
# 1 => 192.168.177.0/24 (unused private subnet cidr)
legacy = 2 # => 192.168.178.0/24 (legacy private subnet)
a = 3 # => 192.168.178.1/24 (first newly created zonal private subnet)
a = 3 # => 192.168.179.0/24 (first newly created zonal private subnet)
b = 4
c = 5
d = 6