terraform: Terraform module for AWS (#2503)

This commit is contained in:
Adrian Stobbe 2023-11-08 19:10:01 +01:00 committed by GitHub
parent 0bac72261d
commit cea6204b37
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
94 changed files with 912 additions and 87 deletions

View file

@ -0,0 +1,85 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.74.0"
constraints = "3.74.0"
hashes = [
"h1:1kSiowd/tBNswp3iv7ePlzkP5llWihjHcY3pdXdJqVU=",
"h1:4b15khHtc5OkIVEFg0W5QRwf/ov1WVQkXVdSiAcTCS8=",
"h1:ETVZfmulZQ435+lgFCkZRpfVOLyAxfDOwbPXFg3aLLQ=",
"h1:H3diAufZ5VDQKsQNYykVRaFTOUJ4gjFiT2VLYi574+w=",
"h1:LEdK8BxNSNiBQbtcJhQZKMMHDjmPpUsvDpr3Mzs93Tg=",
"h1:OtJKZcMwrRNR84ylT1GgMwGR8KTxVOCkNifbjABlGj0=",
"h1:Rq+CNb+4u47dw20tlAeI2yxSOuDtLm+S/GZO2pneLyA=",
"h1:VfBB00BE0wvFiod7BlL+Cn6r2599MEi94hnAQ277ux8=",
"h1:YJ15rwD0G7lYc9OVh5GO4VTqcd2jhqegfgyqTJH1M/I=",
"h1:YvxxiqiwXjZdU53u3b9q49ezsIAb59KmdLLFkwkwFAs=",
"h1:xDRmcV40KrWttPYg/w0/IN/frS9K1twuyvqRNVZko44=",
"zh:0424c70152f949da1ec52ba96d20e5fd32fd22d9bd9203ce045d5f6aab3d20fc",
"zh:16dbf581d10f8e7937185bcdcceb4f91d08c919e452fb8da7580071288c8c397",
"zh:3019103bc2c3b4e185f5c65696c349697644c968f5c085af5505fed6d01c4241",
"zh:49bb56ebaed6653fdb913c2b2bb74fc8b5399e7258d1e89084f72c44ea1130dd",
"zh:85547666517f899d88620bd23a000a8f43c7dc93587c350eb1ea17bcb3e645c7",
"zh:8bed8b646ff1822d8764de68b56b71e5dd971a4b77eba80d47f400a530800bea",
"zh:8bfa6c70c004ba05ebce47f74f49ce872c28a68a18bb71b281a9681bcbbdbfa1",
"zh:a2ae9e38fda0695fb8aa810e4f1ce4b104bfda651a87923b307bb1728680d8b6",
"zh:beac1efe32f99072c892095f5ff46e40d6852b66679a03bc3acbe1b90fb1f653",
"zh:d8a6ca20e49ebe7ea5688d91233d571e2c2ccc3e41000c39a7d7031df209ea8e",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:f937b5fdf49b072c0347408d0a1c5a5d822dae1a23252915930e5a82d1d8ce8b",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
constraints = "3.5.1"
hashes = [
"h1:0ULxM8/DscMzfiDWg1yclBf/39U44wQmlx745BfYZ80=",
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
"h1:6ePAACdONiMGe1j5pwUc0gpDFt82y/ka0zRimMg/geM=",
"h1:BD3Y4CcrGHb9sx+Bl5V8M2PSyw23mykzXSwj+/6FhHA=",
"h1:HGeb7Tajn7HZwX0MhrdyL57LoCSz5GMcI2wbHs12D4U=",
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
"h1:JiENkIxSWc32/2Dtd1n4CWY3ow/PHvAeGhdgcOLpWZM=",
"h1:MROYZuKGTuaTNf2FgbwCgSVpteQW25ubnb+Xfok2jvk=",
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.4"
hashes = [
"h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=",
"h1:Wd3RqmQW60k2QWPN4sK5CtjGuO1d+CRNXgC+D4rKtXc=",
"h1:bNsvpX5EGuVxgGRXBQVLXlmq40PdoLp8Rfuh1ZmV7yY=",
"h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=",
"h1:rKKMyIEBZwR+8j6Tx3PwqBrStuH+J+pxcbCR5XN8WAw=",
"zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
"zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
"zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
"zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
"zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
"zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
"zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
"zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
"zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
"zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
"zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

View file

@ -0,0 +1,290 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "3.74.0"
}
random = {
source = "hashicorp/random"
version = "3.5.1"
}
}
}
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
}
}
locals {
uid = random_id.uid.hex
name = "${var.name}-${local.uid}"
initSecretHash = random_password.initSecret.bcrypt_hash
tags = {
constellation-uid = local.uid,
}
ports_node_range = "30000-32767"
cidr_vpc_subnet_nodes = "192.168.178.0/24"
ports = flatten([
{ name = "kubernetes", port = "6443", health_check_protocol = "Https", path = "/readyz", priority = 100 },
{ name = "bootstrapper", port = "9000", health_check_protocol = "Tcp", path = null, priority = 101 },
{ name = "verify", port = "30081", health_check_protocol = "Tcp", path = null, priority = 102 },
{ name = "konnectivity", port = "8132", health_check_protocol = "Tcp", path = null, priority = 103 },
{ name = "recovery", port = "9999", health_check_protocol = "Tcp", path = null, priority = 104 },
{ name = "join", port = "30090", health_check_protocol = "Tcp", path = null, priority = 105 },
var.debug ? [{ name = "debugd", port = "4000", health_check_protocol = "Tcp", path = null, priority = 106 }] : [],
])
// wildcard_lb_dns_name is the DNS name of the load balancer with a wildcard for the name.
// example: given "name-1234567890.location.cloudapp.azure.com" it will return "*.location.cloudapp.azure.com"
wildcard_lb_dns_name = var.internal_load_balancer ? "" : replace(data.azurerm_public_ip.loadbalancer_ip[0].fqdn, "/^[^.]*\\./", "*.")
// deduce from format (subscriptions)/$ID/resourceGroups/$RG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$NAME"
// move from the right as to ignore the optional prefixes
uai_resource_group = element(split("/", var.user_assigned_identity), length(split("/", var.user_assigned_identity)) - 5)
// deduce as above
uai_name = element(split("/", var.user_assigned_identity), length(split("/", var.user_assigned_identity)) - 1)
in_cluster_endpoint = var.internal_load_balancer ? azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address : azurerm_public_ip.loadbalancer_ip[0].ip_address
out_of_cluster_endpoint = var.debug && var.internal_load_balancer ? module.jump_host[0].ip : local.in_cluster_endpoint
}
resource "random_id" "uid" {
byte_length = 4
}
resource "random_password" "initSecret" {
length = 32
special = true
override_special = "_%@"
}
resource "azurerm_attestation_provider" "attestation_provider" {
count = var.create_maa ? 1 : 0
# name must be between 3 and 24 characters in length and use numbers and lower-case letters only.
name = format("constell%s", local.uid)
resource_group_name = var.resource_group
location = var.location
lifecycle {
# Attestation policies will be set automatically upon creation, even if not specified in the resource,
# while they aren't being incorporated into the Terraform state correctly.
# To prevent them from being set to null when applying an upgrade, ignore the changes until the issue
# is resolved by Azure.
# Related issue: https://github.com/hashicorp/terraform-provider-azurerm/issues/21998
ignore_changes = [open_enclave_policy_base64, sgx_enclave_policy_base64, tpm_policy_base64, sev_snp_policy_base64]
}
}
resource "azurerm_application_insights" "insights" {
name = local.name
location = var.location
resource_group_name = var.resource_group
application_type = "other"
tags = local.tags
}
resource "azurerm_public_ip" "loadbalancer_ip" {
count = var.internal_load_balancer ? 0 : 1
name = "${local.name}-lb"
domain_name_label = local.name
resource_group_name = var.resource_group
location = var.location
allocation_method = "Static"
sku = "Standard"
tags = local.tags
lifecycle {
ignore_changes = [name]
}
}
// Reads data from the resource of the same name.
// Used to wait to the actual resource to become ready, before using data from that resource.
// Property "fqdn" only becomes available on azurerm_public_ip resources once domain_name_label is set.
// Since we are setting domain_name_label starting with 2.10 we need to migrate
// resources for clusters created before 2.9. In those cases we need to wait until loadbalancer_ip has
// been updated before reading from it.
data "azurerm_public_ip" "loadbalancer_ip" {
count = var.internal_load_balancer ? 0 : 1
name = "${local.name}-lb"
resource_group_name = var.resource_group
depends_on = [azurerm_public_ip.loadbalancer_ip]
}
resource "azurerm_public_ip" "nat_gateway_ip" {
name = "${local.name}-nat"
resource_group_name = var.resource_group
location = var.location
allocation_method = "Static"
sku = "Standard"
tags = local.tags
}
resource "azurerm_nat_gateway" "gateway" {
name = local.name
location = var.location
resource_group_name = var.resource_group
sku_name = "Standard"
idle_timeout_in_minutes = 10
}
resource "azurerm_subnet_nat_gateway_association" "example" {
nat_gateway_id = azurerm_nat_gateway.gateway.id
subnet_id = azurerm_subnet.node_subnet.id
}
resource "azurerm_nat_gateway_public_ip_association" "example" {
nat_gateway_id = azurerm_nat_gateway.gateway.id
public_ip_address_id = azurerm_public_ip.nat_gateway_ip.id
}
resource "azurerm_lb" "loadbalancer" {
name = local.name
location = var.location
resource_group_name = var.resource_group
sku = "Standard"
tags = local.tags
dynamic "frontend_ip_configuration" {
for_each = var.internal_load_balancer ? [] : [1]
content {
name = "PublicIPAddress"
public_ip_address_id = azurerm_public_ip.loadbalancer_ip[0].id
}
}
dynamic "frontend_ip_configuration" {
for_each = var.internal_load_balancer ? [1] : []
content {
name = "PrivateIPAddress"
private_ip_address_allocation = "Dynamic"
subnet_id = azurerm_subnet.loadbalancer_subnet[0].id
}
}
}
module "loadbalancer_backend_control_plane" {
source = "./modules/load_balancer_backend"
name = "${local.name}-control-plane"
loadbalancer_id = azurerm_lb.loadbalancer.id
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
ports = local.ports
}
module "loadbalancer_backend_worker" {
source = "./modules/load_balancer_backend"
name = "${local.name}-worker"
loadbalancer_id = azurerm_lb.loadbalancer.id
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
ports = []
}
resource "azurerm_lb_backend_address_pool" "all" {
loadbalancer_id = azurerm_lb.loadbalancer.id
name = "${var.name}-all"
}
resource "azurerm_virtual_network" "network" {
name = local.name
resource_group_name = var.resource_group
location = var.location
address_space = ["10.0.0.0/8"]
tags = local.tags
}
resource "azurerm_subnet" "loadbalancer_subnet" {
count = var.internal_load_balancer ? 1 : 0
name = "${local.name}-lb"
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.network.name
address_prefixes = ["10.10.0.0/16"]
}
resource "azurerm_subnet" "node_subnet" {
name = "${local.name}-node"
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.network.name
address_prefixes = ["10.9.0.0/16"]
}
resource "azurerm_network_security_group" "security_group" {
name = local.name
location = var.location
resource_group_name = var.resource_group
tags = local.tags
dynamic "security_rule" {
for_each = concat(
local.ports,
[{ name = "nodeports", port = local.ports_node_range, priority = 200 }]
)
content {
name = security_rule.value.name
priority = security_rule.value.priority
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = security_rule.value.port
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
}
module "scale_set_group" {
source = "./modules/scale_set"
for_each = var.node_groups
base_name = local.name
node_group_name = each.key
role = each.value.role
zones = each.value.zones
tags = merge(
local.tags,
{ constellation-init-secret-hash = local.initSecretHash },
{ constellation-maa-url = var.create_maa ? azurerm_attestation_provider.attestation_provider[0].attestation_uri : "" },
)
initial_count = each.value.initial_count
state_disk_size = each.value.disk_size
state_disk_type = each.value.disk_type
location = var.location
instance_type = each.value.instance_type
confidential_vm = var.confidential_vm
secure_boot = var.secure_boot
resource_group = var.resource_group
user_assigned_identity = var.user_assigned_identity
image_id = var.image_id
network_security_group_id = azurerm_network_security_group.security_group.id
subnet_id = azurerm_subnet.node_subnet.id
backend_address_pool_ids = each.value.role == "control-plane" ? [
azurerm_lb_backend_address_pool.all.id,
module.loadbalancer_backend_control_plane.backendpool_id
] : [
azurerm_lb_backend_address_pool.all.id,
module.loadbalancer_backend_worker.backendpool_id
]
}
module "jump_host" {
count = var.internal_load_balancer && var.debug ? 1 : 0
source = "./modules/jump_host"
base_name = local.name
resource_group = var.resource_group
location = var.location
subnet_id = azurerm_subnet.loadbalancer_subnet[0].id
ports = [for port in local.ports : port.port]
lb_internal_ip = azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address
}
data "azurerm_subscription" "current" {
}
data "azurerm_user_assigned_identity" "uaid" {
name = local.uai_name
resource_group_name = local.uai_resource_group
}

View file

@ -0,0 +1,85 @@
resource "azurerm_linux_virtual_machine" "jump_host" {
name = "${var.base_name}-jump-host"
resource_group_name = var.resource_group
location = var.location
size = "Standard_D2as_v5"
network_interface_ids = [
azurerm_network_interface.jump_host.id,
]
admin_username = "adminuser"
admin_ssh_key {
username = "adminuser"
public_key = tls_private_key.ssh_key.public_key_openssh
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts-gen2"
version = "latest"
}
boot_diagnostics {
}
user_data = base64encode(<<EOF
#!/bin/bash
set -x
# Uncomment to create user with password
# useradd -m user
# usermod -aG sudo user
# usermod --shell /bin/bash user
# sh -c "echo \"user:pass\" | chpasswd"
sysctl -w net.ipv4.ip_forward=1
sysctl -p
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
lb_ip=${var.lb_internal_ip}
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
lb_ip=$(dig +short ${var.lb_internal_ip})
fi
%{for port in var.ports~}
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination $${lb_ip}:${port}
iptables -t nat -A POSTROUTING -p tcp -d $${lb_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
%{endfor~}
EOF
)
}
resource "azurerm_network_interface" "jump_host" {
name = "${var.base_name}-jump-host"
resource_group_name = var.resource_group
location = var.location
ip_configuration {
name = "public"
subnet_id = var.subnet_id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.jump_host.id
}
}
resource "azurerm_public_ip" "jump_host" {
name = "${var.base_name}-jump-host"
resource_group_name = var.resource_group
location = var.location
allocation_method = "Dynamic"
}
resource "tls_private_key" "ssh_key" {
algorithm = "RSA"
rsa_bits = 4096
}

View file

@ -0,0 +1,3 @@
output "ip" {
value = azurerm_linux_virtual_machine.jump_host.public_ip_address
}

View file

@ -0,0 +1,29 @@
variable "base_name" {
description = "Base name of the jump host"
type = string
}
variable "ports" {
description = "Ports to forward to the load balancer"
type = list(number)
}
variable "resource_group" {
description = "Resource group name to deploy the jump host into"
type = string
}
variable "location" {
description = "Location to deploy the jump host into"
type = string
}
variable "subnet_id" {
description = "Subnet ID to deploy the jump host into"
type = string
}
variable "lb_internal_ip" {
description = "Internal IP of the load balancer"
type = string
}

View file

@ -0,0 +1,38 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "3.74.0"
}
}
}
resource "azurerm_lb_backend_address_pool" "backend_pool" {
loadbalancer_id = var.loadbalancer_id
name = var.name
}
resource "azurerm_lb_probe" "health_probes" {
for_each = { for port in var.ports : port.name => port }
loadbalancer_id = var.loadbalancer_id
name = each.value.name
port = each.value.port
protocol = each.value.health_check_protocol
request_path = each.value.path
interval_in_seconds = 5
}
resource "azurerm_lb_rule" "rules" {
for_each = azurerm_lb_probe.health_probes
loadbalancer_id = var.loadbalancer_id
name = each.value.name
protocol = "Tcp"
frontend_port = each.value.port
backend_port = each.value.port
frontend_ip_configuration_name = var.frontend_ip_configuration_name
backend_address_pool_ids = [azurerm_lb_backend_address_pool.backend_pool.id]
probe_id = each.value.id
disable_outbound_snat = true
}

View file

@ -0,0 +1,4 @@
output "backendpool_id" {
value = azurerm_lb_backend_address_pool.backend_pool.id
description = "The ID of the created backend pool."
}

View file

@ -0,0 +1,25 @@
variable "name" {
type = string
default = "constell"
description = "Base name of the cluster."
}
variable "frontend_ip_configuration_name" {
type = string
description = "The name of the frontend IP configuration to use for the load balancer."
}
variable "loadbalancer_id" {
type = string
description = "The ID of the load balancer to add the backend to."
}
variable "ports" {
type = list(object({
name = string
port = number
health_check_protocol = string
path = string
}))
description = "The ports to add to the backend. Protocol can be either 'Tcp' or 'Https'. Path is only used for 'Https' protocol and can otherwise be null."
}

View file

@ -0,0 +1,102 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "3.74.0"
}
random = {
source = "hashicorp/random"
version = "3.5.1"
}
}
}
locals {
tags = merge(
var.tags,
{ constellation-role = var.role },
{ constellation-node-group = var.node_group_name },
)
group_uid = random_id.uid.hex
name = "${var.base_name}-${var.role}-${local.group_uid}"
}
resource "random_id" "uid" {
byte_length = 4
}
resource "random_password" "password" {
length = 16
min_lower = 1
min_upper = 1
min_numeric = 1
min_special = 1
}
resource "azurerm_linux_virtual_machine_scale_set" "scale_set" {
name = local.name
resource_group_name = var.resource_group
location = var.location
sku = var.instance_type
instances = var.initial_count
admin_username = "adminuser"
admin_password = random_password.password.result
overprovision = false
provision_vm_agent = false
vtpm_enabled = true
disable_password_authentication = false
upgrade_mode = "Manual"
secure_boot_enabled = var.secure_boot
source_image_id = var.image_id
tags = local.tags
zones = var.zones
identity {
type = "UserAssigned"
identity_ids = [var.user_assigned_identity]
}
boot_diagnostics {}
dynamic "os_disk" {
for_each = var.confidential_vm ? [1] : [] # if confidential_vm is true
content {
security_encryption_type = "VMGuestStateOnly"
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
}
dynamic "os_disk" {
for_each = var.confidential_vm ? [] : [1] # else
content {
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
}
data_disk {
storage_account_type = var.state_disk_type
disk_size_gb = var.state_disk_size
caching = "ReadWrite"
lun = 0
}
network_interface {
name = "node-network"
primary = true
network_security_group_id = var.network_security_group_id
ip_configuration {
name = "node-network"
primary = true
subnet_id = var.subnet_id
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids
}
}
lifecycle {
ignore_changes = [
name, # required. Allow legacy scale sets to keep their old names
instances, # required. autoscaling modifies the instance count externally
source_image_id, # required. update procedure modifies the image id externally
]
}
}

View file

@ -0,0 +1,98 @@
variable "base_name" {
type = string
description = "Base name of the instance group."
}
variable "node_group_name" {
type = string
description = "Constellation name for the node group (used for configuration and CSP-independent naming)."
}
variable "role" {
type = string
description = "The role of the instance group."
validation {
condition = contains(["control-plane", "worker"], var.role)
error_message = "The role has to be 'control-plane' or 'worker'."
}
}
variable "tags" {
type = map(string)
description = "Tags to include in the scale_set."
}
variable "zones" {
type = list(string)
description = "List of availability zones."
default = null
}
variable "initial_count" {
type = number
description = "The number of instances in this scale set."
}
variable "instance_type" {
type = string
description = "The Azure instance type to deploy."
}
variable "state_disk_size" {
type = number
default = 30
description = "The size of the state disk in GB."
}
variable "resource_group" {
type = string
description = "The name of the Azure resource group to create the Constellation cluster in."
}
variable "location" {
type = string
description = "The Azure location to deploy the cluster in."
}
variable "image_id" {
type = string
description = "The image to use for the cluster nodes."
}
variable "user_assigned_identity" {
type = string
description = "The name of the user assigned identity to attache to the nodes of the cluster."
}
variable "state_disk_type" {
type = string
default = "Premium_LRS"
description = "The type of the state disk."
}
variable "network_security_group_id" {
type = string
description = "The ID of the network security group to use for the scale set."
}
variable "backend_address_pool_ids" {
type = list(string)
description = "The IDs of the backend address pools to use for the scale set."
}
variable "subnet_id" {
type = string
description = "The ID of the subnet to use for the scale set."
}
variable "confidential_vm" {
type = bool
default = true
description = "Whether to deploy the cluster nodes as confidential VMs."
}
variable "secure_boot" {
type = bool
default = false
description = "Whether to deploy the cluster nodes with secure boot."
}

View file

@ -0,0 +1,64 @@
output "out_of_cluster_endpoint" {
value = local.out_of_cluster_endpoint
}
output "in_cluster_endpoint" {
value = local.in_cluster_endpoint
}
output "api_server_cert_sans" {
value = sort(
distinct(
concat(
[
local.in_cluster_endpoint,
local.out_of_cluster_endpoint,
],
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
var.internal_load_balancer ? [] : [local.wildcard_lb_dns_name],
)
)
)
}
output "uid" {
value = local.uid
}
output "initSecret" {
value = random_password.initSecret.result
sensitive = true
}
output "attestationURL" {
value = var.create_maa ? azurerm_attestation_provider.attestation_provider[0].attestation_uri : ""
}
output "network_security_group_name" {
value = azurerm_network_security_group.security_group.name
}
output "loadbalancer_name" {
value = azurerm_lb.loadbalancer.name
}
output "user_assigned_identity_client_id" {
value = data.azurerm_user_assigned_identity.uaid.client_id
}
output "resource_group" {
value = var.resource_group
}
output "subscription_id" {
value = data.azurerm_subscription.current.subscription_id
}
output "name" {
value = local.name
}
output "ip_cidr_nodes" {
value = local.cidr_vpc_subnet_nodes
}

View file

@ -0,0 +1,75 @@
variable "name" {
type = string
description = "Base name of the cluster."
}
variable "node_groups" {
type = map(object({
role = string
initial_count = optional(number)
instance_type = string
disk_size = number
disk_type = string
zones = optional(list(string))
}))
description = "A map of node group names to node group configurations."
validation {
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
error_message = "The role has to be 'control-plane' or 'worker'."
}
}
variable "location" {
type = string
description = "The Azure location to deploy the cluster in."
}
variable "image_id" {
type = string
description = "The image to use for the cluster nodes."
}
variable "create_maa" {
type = bool
default = false
description = "Whether to create a Microsoft Azure attestation provider."
}
variable "debug" {
type = bool
default = false
description = "Enable debug mode. This opens up a debugd port that can be used to deploy a custom bootstrapper."
}
variable "confidential_vm" {
type = bool
default = true
description = "Whether to deploy the cluster nodes as confidential VMs."
}
variable "secure_boot" {
type = bool
default = false
description = "Whether to deploy the cluster nodes with secure boot."
}
variable "resource_group" {
type = string
description = "The name of the Azure resource group to create the Constellation cluster in."
}
variable "user_assigned_identity" {
type = string
description = "The name of the user assigned identity to attach to the nodes of the cluster. Should be of format: /subscriptions/$ID/resourceGroups/$RG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$NAME"
}
variable "custom_endpoint" {
type = string
default = ""
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
}
variable "internal_load_balancer" {
type = bool
default = false
description = "Whether to use an internal load balancer for the Constellation."
}