Support internal load balancers (#2388)

* arch: support internal lb on Azure

* arch: support internal lb on GCP

* helm: remove lb svc from verify deployment

* arch: support internal lb on AWS

* terraform: add jump hosts for internal lb

* cli: expose internalLoadBalancer in config

* ci: add e2e-manual-internal

* add in-cluster endpoint to terraform output
This commit is contained in:
3u13r 2023-10-17 15:46:15 +02:00 committed by GitHub
parent fe7e16e1cc
commit 0c89f57ac5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
46 changed files with 1310 additions and 412 deletions

View file

@ -73,6 +73,17 @@ go_library(
"terraform/iam/aws/.terraform.lock.hcl",
"terraform/iam/azure/.terraform.lock.hcl",
"terraform/iam/gcp/.terraform.lock.hcl",
"terraform/gcp/modules/internal_load_balancer/main.tf",
"terraform/gcp/modules/internal_load_balancer/variables.tf",
"terraform/gcp/modules/jump_host/main.tf",
"terraform/gcp/modules/jump_host/outputs.tf",
"terraform/gcp/modules/jump_host/variables.tf",
"terraform/aws/modules/jump_host/main.tf",
"terraform/aws/modules/jump_host/output.tf",
"terraform/aws/modules/jump_host/variables.tf",
"terraform/azure/modules/jump_host/main.tf",
"terraform/azure/modules/jump_host/variables.tf",
"terraform/azure/modules/jump_host/outputs.tf",
],
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/terraform",
visibility = ["//cli:__subpackages__"],

View file

@ -181,11 +181,20 @@ func (c *Client) ShowInfrastructure(ctx context.Context, provider cloudprovider.
return state.Infrastructure{}, errors.New("terraform show: no values returned")
}
ipOutput, ok := tfState.Values.Outputs["ip"]
outOfClusterEndpointOutput, ok := tfState.Values.Outputs["out_of_cluster_endpoint"]
if !ok {
return state.Infrastructure{}, errors.New("no IP output found")
return state.Infrastructure{}, errors.New("no out_of_cluster_endpoint output found")
}
ip, ok := ipOutput.Value.(string)
outOfClusterEndpoint, ok := outOfClusterEndpointOutput.Value.(string)
if !ok {
return state.Infrastructure{}, errors.New("invalid type in IP output: not a string")
}
inClusterEndpointOutput, ok := tfState.Values.Outputs["in_cluster_endpoint"]
if !ok {
return state.Infrastructure{}, errors.New("no in_cluster_endpoint output found")
}
inClusterEndpoint, ok := inClusterEndpointOutput.Value.(string)
if !ok {
return state.Infrastructure{}, errors.New("invalid type in IP output: not a string")
}
@ -231,7 +240,8 @@ func (c *Client) ShowInfrastructure(ctx context.Context, provider cloudprovider.
}
res := state.Infrastructure{
ClusterEndpoint: ip,
ClusterEndpoint: outOfClusterEndpoint,
InClusterEndpoint: inClusterEndpoint,
APIServerCertSANs: apiServerCertSANs,
InitSecret: []byte(secret),
UID: uid,

View file

@ -51,6 +51,9 @@ locals {
tags = {
constellation-uid = local.uid,
}
in_cluster_endpoint = aws_lb.front_end.dns_name
out_of_cluster_endpoint = var.internal_load_balancer && var.debug ? module.jump_host[0].ip : local.in_cluster_endpoint
}
resource "random_id" "uid" {
@ -84,14 +87,14 @@ resource "aws_eip" "lb" {
# in a future version to support all availability zones in the chosen region
# This should only be done after we migrated to DNS-based addressing for the
# control-plane.
for_each = toset([var.zone])
for_each = var.internal_load_balancer ? [] : toset([var.zone])
domain = "vpc"
tags = merge(local.tags, { "constellation-ip-endpoint" = each.key == var.zone ? "legacy-primary-zone" : "additional-zone" })
}
resource "aws_lb" "front_end" {
name = "${local.name}-loadbalancer"
internal = false
internal = var.internal_load_balancer
load_balancer_type = "network"
tags = local.tags
security_groups = [aws_security_group.security_group.id]
@ -106,7 +109,7 @@ resource "aws_lb" "front_end" {
for_each = toset([var.zone])
content {
subnet_id = module.public_private_subnet.public_subnet_id[subnet_mapping.key]
allocation_id = aws_eip.lb[subnet_mapping.key].id
allocation_id = var.internal_load_balancer ? "" : aws_eip.lb[subnet_mapping.key].id
}
}
enable_cross_zone_load_balancing = true
@ -206,6 +209,17 @@ module "instance_group" {
)
}
module "jump_host" {
count = var.internal_load_balancer && var.debug ? 1 : 0
source = "./modules/jump_host"
base_name = local.name
subnet_id = module.public_private_subnet.public_subnet_id[var.zone]
lb_internal_ip = aws_lb.front_end.dns_name
ports = [for port in local.load_balancer_ports : port.port]
iam_instance_profile = var.iam_instance_profile_worker_nodes
security_group_id = aws_security_group.security_group.id
}
# TODO(31u3r): Remove once 2.12 is released
moved {
from = module.load_balancer_target_konnectivity
@ -241,3 +255,4 @@ moved {
from = module.load_balancer_target_bootstrapper
to = module.load_balancer_targets["bootstrapper"]
}

View file

@ -0,0 +1,59 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "5.17.0"
}
}
}
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"] # Canonical
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"]
}
}
resource "aws_instance" "jump_host" {
ami = data.aws_ami.ubuntu.id
instance_type = "c5a.large"
associate_public_ip_address = true
iam_instance_profile = var.iam_instance_profile
subnet_id = var.subnet_id
security_groups = [var.security_group_id]
tags = {
"Name" = "${var.base_name}-jump-host"
}
user_data = <<EOF
#!/bin/bash
set -x
# Uncomment to create user with password
# useradd -m user
# usermod -aG sudo user
# usermod --shell /bin/bash user
# sh -c "echo \"user:pass\" | chpasswd"
sysctl -w net.ipv4.ip_forward=1
sysctl -p
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
lb_ip=${var.lb_internal_ip}
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
lb_ip=$(dig +short ${var.lb_internal_ip})
fi
%{for port in var.ports~}
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination $${lb_ip}:${port}
iptables -t nat -A POSTROUTING -p tcp -d $${lb_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
%{endfor~}
EOF
}

View file

@ -0,0 +1,3 @@
output "ip" {
value = aws_instance.jump_host.public_ip
}

View file

@ -0,0 +1,29 @@
variable "base_name" {
description = "Base name of the jump host"
type = string
}
variable "subnet_id" {
description = "Subnet ID to deploy the jump host into"
type = string
}
variable "lb_internal_ip" {
description = "Internal IP of the load balancer"
type = string
}
variable "iam_instance_profile" {
description = "IAM instance profile to attach to the jump host"
type = string
}
variable "ports" {
description = "Ports to forward to the load balancer"
type = list(number)
}
variable "security_group_id" {
description = "Security group to attach to the jump host"
type = string
}

View file

@ -8,11 +8,12 @@ terraform {
}
resource "aws_lb_target_group" "front_end" {
name = var.name
port = var.port
protocol = "TCP"
vpc_id = var.vpc_id
tags = var.tags
name = var.name
port = var.port
protocol = "TCP"
vpc_id = var.vpc_id
tags = var.tags
preserve_client_ip = "false"
health_check {
port = var.port

View file

@ -1,9 +1,22 @@
output "ip" {
value = aws_eip.lb[var.zone].public_ip
output "out_of_cluster_endpoint" {
value = local.out_of_cluster_endpoint
}
output "in_cluster_endpoint" {
value = local.in_cluster_endpoint
}
output "api_server_cert_sans" {
value = sort(concat([aws_eip.lb[var.zone].public_ip, local.wildcard_lb_dns_name], var.custom_endpoint == "" ? [] : [var.custom_endpoint]))
value = sort(
distinct(
concat(
[
local.in_cluster_endpoint,
local.out_of_cluster_endpoint,
],
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
)
)
)
}
output "uid" {

View file

@ -69,3 +69,9 @@ variable "custom_endpoint" {
default = ""
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
}
variable "internal_load_balancer" {
type = bool
default = false
description = "Use an internal load balancer."
}

View file

@ -60,3 +60,26 @@ provider "registry.terraform.io/hashicorp/random" {
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.4"
hashes = [
"h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=",
"h1:Wd3RqmQW60k2QWPN4sK5CtjGuO1d+CRNXgC+D4rKtXc=",
"h1:bNsvpX5EGuVxgGRXBQVLXlmq40PdoLp8Rfuh1ZmV7yY=",
"h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=",
"h1:rKKMyIEBZwR+8j6Tx3PwqBrStuH+J+pxcbCR5XN8WAw=",
"zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
"zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
"zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
"zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
"zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
"zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
"zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
"zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
"zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
"zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
"zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

View file

@ -40,12 +40,15 @@ locals {
])
// wildcard_lb_dns_name is the DNS name of the load balancer with a wildcard for the name.
// example: given "name-1234567890.location.cloudapp.azure.com" it will return "*.location.cloudapp.azure.com"
wildcard_lb_dns_name = replace(data.azurerm_public_ip.loadbalancer_ip.fqdn, "/^[^.]*\\./", "*.")
wildcard_lb_dns_name = var.internal_load_balancer ? "" : replace(data.azurerm_public_ip.loadbalancer_ip[0].fqdn, "/^[^.]*\\./", "*.")
// deduce from format (subscriptions)/$ID/resourceGroups/$RG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$NAME"
// move from the right as to ignore the optional prefixes
uai_resource_group = element(split("/", var.user_assigned_identity), length(split("/", var.user_assigned_identity)) - 5)
// deduce as above
uai_name = element(split("/", var.user_assigned_identity), length(split("/", var.user_assigned_identity)) - 1)
in_cluster_endpoint = var.internal_load_balancer ? azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address : azurerm_public_ip.loadbalancer_ip[0].ip_address
out_of_cluster_endpoint = var.debug && var.internal_load_balancer ? module.jump_host[0].ip : local.in_cluster_endpoint
}
resource "random_id" "uid" {
@ -84,6 +87,7 @@ resource "azurerm_application_insights" "insights" {
}
resource "azurerm_public_ip" "loadbalancer_ip" {
count = var.internal_load_balancer ? 0 : 1
name = "${local.name}-lb"
domain_name_label = local.name
resource_group_name = var.resource_group
@ -104,6 +108,7 @@ resource "azurerm_public_ip" "loadbalancer_ip" {
// resources for clusters created before 2.9. In those cases we need to wait until loadbalancer_ip has
// been updated before reading from it.
data "azurerm_public_ip" "loadbalancer_ip" {
count = var.internal_load_balancer ? 0 : 1
name = "${local.name}-lb"
resource_group_name = var.resource_group
depends_on = [azurerm_public_ip.loadbalancer_ip]
@ -143,9 +148,21 @@ resource "azurerm_lb" "loadbalancer" {
sku = "Standard"
tags = local.tags
frontend_ip_configuration {
name = "PublicIPAddress"
public_ip_address_id = azurerm_public_ip.loadbalancer_ip.id
dynamic "frontend_ip_configuration" {
for_each = var.internal_load_balancer ? [] : [1]
content {
name = "PublicIPAddress"
public_ip_address_id = azurerm_public_ip.loadbalancer_ip[0].id
}
}
dynamic "frontend_ip_configuration" {
for_each = var.internal_load_balancer ? [1] : []
content {
name = "PrivateIPAddress"
private_ip_address_allocation = "Dynamic"
subnet_id = azurerm_subnet.loadbalancer_subnet[0].id
}
}
}
@ -180,6 +197,14 @@ resource "azurerm_virtual_network" "network" {
tags = local.tags
}
resource "azurerm_subnet" "loadbalancer_subnet" {
count = var.internal_load_balancer ? 1 : 0
name = "${local.name}-lb"
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.network.name
address_prefixes = ["10.10.0.0/16"]
}
resource "azurerm_subnet" "node_subnet" {
name = "${local.name}-node"
resource_group_name = var.resource_group
@ -246,6 +271,17 @@ module "scale_set_group" {
]
}
module "jump_host" {
count = var.internal_load_balancer && var.debug ? 1 : 0
source = "./modules/jump_host"
base_name = local.name
resource_group = var.resource_group
location = var.location
subnet_id = azurerm_subnet.loadbalancer_subnet[0].id
ports = [for port in local.ports : port.port]
lb_internal_ip = azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address
}
data "azurerm_subscription" "current" {
}

View file

@ -0,0 +1,85 @@
resource "azurerm_linux_virtual_machine" "jump_host" {
name = "${var.base_name}-jump-host"
resource_group_name = var.resource_group
location = var.location
size = "Standard_D2as_v5"
network_interface_ids = [
azurerm_network_interface.jump_host.id,
]
admin_username = "adminuser"
admin_ssh_key {
username = "adminuser"
public_key = tls_private_key.ssh_key.public_key_openssh
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts-gen2"
version = "latest"
}
boot_diagnostics {
}
user_data = base64encode(<<EOF
#!/bin/bash
set -x
# Uncomment to create user with password
# useradd -m user
# usermod -aG sudo user
# usermod --shell /bin/bash user
# sh -c "echo \"user:pass\" | chpasswd"
sysctl -w net.ipv4.ip_forward=1
sysctl -p
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
lb_ip=${var.lb_internal_ip}
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
lb_ip=$(dig +short ${var.lb_internal_ip})
fi
%{for port in var.ports~}
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination $${lb_ip}:${port}
iptables -t nat -A POSTROUTING -p tcp -d $${lb_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
%{endfor~}
EOF
)
}
resource "azurerm_network_interface" "jump_host" {
name = "${var.base_name}-jump-host"
resource_group_name = var.resource_group
location = var.location
ip_configuration {
name = "public"
subnet_id = var.subnet_id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.jump_host.id
}
}
resource "azurerm_public_ip" "jump_host" {
name = "${var.base_name}-jump-host"
resource_group_name = var.resource_group
location = var.location
allocation_method = "Dynamic"
}
resource "tls_private_key" "ssh_key" {
algorithm = "RSA"
rsa_bits = 4096
}

View file

@ -0,0 +1,3 @@
output "ip" {
value = azurerm_linux_virtual_machine.jump_host.public_ip_address
}

View file

@ -0,0 +1,29 @@
variable "base_name" {
description = "Base name of the jump host"
type = string
}
variable "ports" {
description = "Ports to forward to the load balancer"
type = list(number)
}
variable "resource_group" {
description = "Resource group name to deploy the jump host into"
type = string
}
variable "location" {
description = "Location to deploy the jump host into"
type = string
}
variable "subnet_id" {
description = "Subnet ID to deploy the jump host into"
type = string
}
variable "lb_internal_ip" {
description = "Internal IP of the load balancer"
type = string
}

View file

@ -1,9 +1,24 @@
output "ip" {
value = azurerm_public_ip.loadbalancer_ip.ip_address
output "out_of_cluster_endpoint" {
value = local.out_of_cluster_endpoint
}
output "in_cluster_endpoint" {
value = local.in_cluster_endpoint
}
output "api_server_cert_sans" {
value = sort(concat([azurerm_public_ip.loadbalancer_ip.ip_address, local.wildcard_lb_dns_name], var.custom_endpoint == "" ? [] : [var.custom_endpoint]))
value = sort(
distinct(
concat(
[
local.in_cluster_endpoint,
local.out_of_cluster_endpoint,
],
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
var.internal_load_balancer ? [] : [local.wildcard_lb_dns_name],
)
)
)
}
output "uid" {

View file

@ -67,3 +67,9 @@ variable "custom_endpoint" {
default = ""
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
}
variable "internal_load_balancer" {
type = bool
default = false
description = "Whether to use an internal load balancer for the Constellation."
}

View file

@ -57,6 +57,8 @@ locals {
control_plane_instance_groups = [
for control_plane in local.node_groups_by_role["control-plane"] : module.instance_group[control_plane].instance_group
]
in_cluster_endpoint = var.internal_load_balancer ? google_compute_address.loadbalancer_ip_internal[0].address : google_compute_global_address.loadbalancer_ip[0].address
out_of_cluster_endpoint = var.debug && var.internal_load_balancer ? module.jump_host[0].ip : local.in_cluster_endpoint
}
resource "random_id" "uid" {
@ -89,6 +91,26 @@ resource "google_compute_subnetwork" "vpc_subnetwork" {
]
}
resource "google_compute_subnetwork" "proxy_subnet" {
count = var.internal_load_balancer ? 1 : 0
name = "${local.name}-proxy"
ip_cidr_range = local.cidr_vpc_subnet_proxy
region = var.region
purpose = "REGIONAL_MANAGED_PROXY"
role = "ACTIVE"
network = google_compute_network.vpc_network.id
}
resource "google_compute_subnetwork" "ilb_subnet" {
count = var.internal_load_balancer ? 1 : 0
name = "${local.name}-ilb"
ip_cidr_range = local.cidr_vpc_subnet_ilb
region = var.region
network = google_compute_network.vpc_network.id
depends_on = [google_compute_subnetwork.proxy_subnet]
}
resource "google_compute_router" "vpc_router" {
name = local.name
description = "Constellation VPC router"
@ -114,6 +136,7 @@ resource "google_compute_firewall" "firewall_external" {
ports = flatten([
[for port in local.control_plane_named_ports : port.port],
[local.ports_node_range],
var.internal_load_balancer ? [22] : [],
])
}
@ -168,23 +191,59 @@ module "instance_group" {
custom_endpoint = var.custom_endpoint
}
resource "google_compute_address" "loadbalancer_ip_internal" {
count = var.internal_load_balancer ? 1 : 0
name = local.name
region = var.region
subnetwork = google_compute_subnetwork.ilb_subnet[0].id
purpose = "SHARED_LOADBALANCER_VIP"
address_type = "INTERNAL"
}
resource "google_compute_global_address" "loadbalancer_ip" {
name = local.name
count = var.internal_load_balancer ? 0 : 1
name = local.name
}
module "loadbalancer_public" {
// for every port in control_plane_named_ports if internal lb is disabled
for_each = { for port in local.control_plane_named_ports : port.name => port }
for_each = var.internal_load_balancer ? {} : { for port in local.control_plane_named_ports : port.name => port }
source = "./modules/loadbalancer"
name = local.name
backend_port_name = each.value.name
port = each.value.port
health_check = each.value.health_check
backend_instance_groups = local.control_plane_instance_groups
ip_address = google_compute_global_address.loadbalancer_ip.self_link
ip_address = google_compute_global_address.loadbalancer_ip[0].self_link
frontend_labels = merge(local.labels, { constellation-use = each.value.name })
}
module "loadbalancer_internal" {
for_each = var.internal_load_balancer ? { for port in local.control_plane_named_ports : port.name => port } : {}
source = "./modules/internal_load_balancer"
name = local.name
backend_port_name = each.value.name
port = each.value.port
health_check = each.value.health_check
backend_instance_group = local.control_plane_instance_groups[0]
ip_address = google_compute_address.loadbalancer_ip_internal[0].self_link
frontend_labels = merge(local.labels, { constellation-use = each.value.name })
region = var.region
network = google_compute_network.vpc_network.id
backend_subnet = google_compute_subnetwork.ilb_subnet[0].id
}
module "jump_host" {
count = var.internal_load_balancer && var.debug ? 1 : 0
source = "./modules/jump_host"
base_name = local.name
zone = var.zone
subnetwork = google_compute_subnetwork.vpc_subnetwork.id
labels = local.labels
lb_internal_ip = google_compute_address.loadbalancer_ip_internal[0].address
ports = [for port in local.control_plane_named_ports : port.port]
}
moved {
from = module.loadbalancer_boot
to = module.loadbalancer_public["bootstrapper"]
@ -210,11 +269,6 @@ moved {
to = module.loadbalancer_public["recovery"]
}
moved {
from = module.loadbalancer_join
to = module.loadbalancer_public["join"]
}
moved {
from = module.loadbalancer_debugd[0]
to = module.loadbalancer_public["debugd"]

View file

@ -0,0 +1,72 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "4.83.0"
}
}
}
locals {
name = "${var.name}-${var.backend_port_name}"
}
resource "google_compute_region_health_check" "health" {
name = local.name
region = var.region
check_interval_sec = 1
timeout_sec = 1
dynamic "tcp_health_check" {
for_each = var.health_check == "TCP" ? [1] : []
content {
port = var.port
}
}
dynamic "https_health_check" {
for_each = var.health_check == "HTTPS" ? [1] : []
content {
host = ""
port = var.port
request_path = "/readyz"
}
}
}
resource "google_compute_region_backend_service" "backend" {
name = local.name
protocol = "TCP"
load_balancing_scheme = "INTERNAL_MANAGED"
health_checks = [google_compute_region_health_check.health.id]
port_name = var.backend_port_name
timeout_sec = 240
region = var.region
backend {
group = var.backend_instance_group
balancing_mode = "UTILIZATION"
capacity_scaler = 1.0
}
}
resource "google_compute_region_target_tcp_proxy" "proxy" {
name = local.name
region = var.region
backend_service = google_compute_region_backend_service.backend.id
}
# forwarding rule
resource "google_compute_forwarding_rule" "forwarding" {
name = local.name
network = var.network
subnetwork = var.backend_subnet
region = var.region
ip_address = var.ip_address
ip_protocol = "TCP"
load_balancing_scheme = "INTERNAL_MANAGED"
port_range = var.port
allow_global_access = true
target = google_compute_region_target_tcp_proxy.proxy.id
labels = var.frontend_labels
}

View file

@ -0,0 +1,54 @@
variable "name" {
type = string
description = "Base name of the load balancer."
}
variable "region" {
type = string
description = "The region where the load balancer will be created."
}
variable "network" {
type = string
description = "The network to which all network resources will be attached."
}
variable "backend_subnet" {
type = string
description = "The subnet to which all backend network resources will be attached."
}
variable "health_check" {
type = string
description = "The type of the health check. 'HTTPS' or 'TCP'."
validation {
condition = contains(["HTTPS", "TCP"], var.health_check)
error_message = "Health check must be either 'HTTPS' or 'TCP'."
}
}
variable "port" {
type = string
description = "The port on which to listen for incoming traffic."
}
variable "backend_port_name" {
type = string
description = "Name of backend port. The same name should appear in the instance groups referenced by this service."
}
variable "backend_instance_group" {
type = string
description = "The URL of the instance group resource from which the load balancer will direct traffic."
}
variable "ip_address" {
type = string
description = "The IP address that this forwarding rule serves."
}
variable "frontend_labels" {
type = map(string)
default = {}
description = "Labels to apply to the forwarding rule."
}

View file

@ -0,0 +1,73 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "4.83.0"
}
google-beta = {
source = "hashicorp/google-beta"
version = "4.83.0"
}
}
}
data "google_compute_image" "image_ubuntu" {
family = "ubuntu-2204-lts"
project = "ubuntu-os-cloud"
}
resource "google_compute_instance" "vm_instance" {
name = "${var.base_name}-jumphost"
machine_type = "n2d-standard-4"
zone = var.zone
boot_disk {
initialize_params {
image = data.google_compute_image.image_ubuntu.self_link
}
}
network_interface {
subnetwork = var.subnetwork
access_config {
}
}
service_account {
scopes = ["compute-ro"]
}
labels = var.labels
metadata = {
serial-port-enable = "TRUE"
}
metadata_startup_script = <<EOF
#!/bin/bash
set -x
# Uncomment to create user with password
# useradd -m user
# usermod -aG sudo user
# usermod --shell /bin/bash user
# sh -c "echo \"user:pass\" | chpasswd"
sysctl -w net.ipv4.ip_forward=1
sysctl -p
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
lb_ip=${var.lb_internal_ip}
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
lb_ip=$(dig +short ${var.lb_internal_ip})
fi
%{for port in var.ports~}
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination ${var.lb_internal_ip}:${port}
iptables -t nat -A POSTROUTING -p tcp -d ${var.lb_internal_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
%{endfor~}
EOF
}

View file

@ -0,0 +1,3 @@
output "ip" {
value = google_compute_instance.vm_instance.network_interface[0].access_config[0].nat_ip
}

View file

@ -0,0 +1,30 @@
variable "base_name" {
type = string
description = "Base name of the instance group."
}
variable "labels" {
type = map(string)
default = {}
description = "Labels to apply to the instance group."
}
variable "subnetwork" {
type = string
description = "Name of the subnetwork to use."
}
variable "zone" {
type = string
description = "Zone to deploy the instance group in."
}
variable "lb_internal_ip" {
type = string
description = "Internal IP of the load balancer."
}
variable "ports" {
type = list(number)
description = "Ports to forward to the load balancer."
}

View file

@ -1,13 +1,22 @@
output "ip" {
value = google_compute_global_address.loadbalancer_ip.address
output "out_of_cluster_endpoint" {
value = local.out_of_cluster_endpoint
}
output "in_cluster_endpoint" {
value = local.in_cluster_endpoint
}
output "api_server_cert_sans" {
value = sort(concat([google_compute_global_address.loadbalancer_ip.address], var.custom_endpoint == "" ? [] : [var.custom_endpoint]))
}
output "fallback_endpoint" {
value = google_compute_global_address.loadbalancer_ip.address
value = sort(
distinct(
concat(
[
local.in_cluster_endpoint,
local.out_of_cluster_endpoint,
],
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
)
)
)
}
output "uid" {

View file

@ -51,3 +51,9 @@ variable "custom_endpoint" {
default = ""
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
}
variable "internal_load_balancer" {
type = bool
default = false
description = "Enable internal load balancer. This can only be enabled if the control-plane is deployed in one zone."
}

View file

@ -212,9 +212,12 @@ func TestCreateCluster(t *testing.T) {
workingState := tfjson.State{
Values: &tfjson.StateValues{
Outputs: map[string]*tfjson.StateOutput{
"ip": {
"out_of_cluster_endpoint": {
Value: "192.0.2.100",
},
"in_cluster_endpoint": {
Value: "192.0.2.101",
},
"initSecret": {
Value: "initSecret",
},
@ -236,9 +239,12 @@ func TestCreateCluster(t *testing.T) {
workingState := tfjson.State{
Values: &tfjson.StateValues{
Outputs: map[string]*tfjson.StateOutput{
"ip": {
"out_of_cluster_endpoint": {
Value: "192.0.2.100",
},
"in_cluster_endpoint": {
Value: "192.0.2.101",
},
"initSecret": {
Value: "initSecret",
},
@ -480,6 +486,7 @@ func TestCreateCluster(t *testing.T) {
assert.Equal("192.0.2.100", infraState.ClusterEndpoint)
assert.Equal(state.HexBytes("initSecret"), infraState.InitSecret)
assert.Equal("12345abc", infraState.UID)
assert.Equal("192.0.2.101", infraState.InClusterEndpoint)
if tc.provider == cloudprovider.Azure {
assert.Equal(tc.expectedAttestationURL, infraState.Azure.AttestationURL)
}

View file

@ -66,6 +66,8 @@ type AWSClusterVariables struct {
NodeGroups map[string]AWSNodeGroup `hcl:"node_groups" cty:"node_groups"`
// CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server.
CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"`
// InternalLoadBalancer is true if an internal load balancer should be created.
InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"`
}
// GetCreateMAA gets the CreateMAA variable.
@ -131,6 +133,8 @@ type GCPClusterVariables struct {
NodeGroups map[string]GCPNodeGroup `hcl:"node_groups" cty:"node_groups"`
// CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server.
CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"`
// InternalLoadBalancer is true if an internal load balancer should be created.
InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"`
}
// GetCreateMAA gets the CreateMAA variable.
@ -203,6 +207,8 @@ type AzureClusterVariables struct {
NodeGroups map[string]AzureNodeGroup `hcl:"node_groups" cty:"node_groups"`
// CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server.
CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"`
// InternalLoadBalancer is true if an internal load balancer should be created.
InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"`
}
// GetCreateMAA gets the CreateMAA variable.
@ -275,6 +281,8 @@ type OpenStackClusterVariables struct {
Debug bool `hcl:"debug" cty:"debug"`
// CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server.
CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"`
// InternalLoadBalancer is true if an internal load balancer should be created.
InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"`
}
// GetCreateMAA gets the CreateMAA variable.
@ -346,6 +354,8 @@ type QEMUVariables struct {
KernelCmdline *string `hcl:"constellation_cmdline" cty:"constellation_cmdline"`
// CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server.
CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"`
// InternalLoadBalancer is true if an internal load balancer should be created.
InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"`
}
// GetCreateMAA gets the CreateMAA variable.

View file

@ -73,7 +73,8 @@ node_groups = {
zone = "eu-central-1c"
}
}
custom_endpoint = "example.com"
custom_endpoint = "example.com"
internal_load_balancer = false
`
got := vars.String()
assert.Equal(t, want, got)
@ -147,7 +148,8 @@ node_groups = {
zone = "eu-central-1b"
}
}
custom_endpoint = "example.com"
custom_endpoint = "example.com"
internal_load_balancer = false
`
got := vars.String()
assert.Equal(t, want, got)
@ -212,7 +214,8 @@ node_groups = {
zones = null
}
}
custom_endpoint = "example.com"
custom_endpoint = "example.com"
internal_load_balancer = false
`
got := vars.String()
assert.Equal(t, want, got)
@ -279,6 +282,7 @@ openstack_username = "my-username"
openstack_password = "my-password"
debug = true
custom_endpoint = "example.com"
internal_load_balancer = false
`
got := vars.String()
assert.Equal(t, want, got)
@ -333,6 +337,7 @@ nvram = "/usr/share/OVMF/OVMF_VARS.fd"
constellation_initrd = "/var/lib/libvirt/images/cluster-name-initrd"
constellation_cmdline = "console=ttyS0,115200n8"
custom_endpoint = "example.com"
internal_load_balancer = false
`
got := vars.String()
assert.Equal(t, want, got)