mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-08-09 07:22:40 -04:00
terraform: Terraform module for AWS (#2503)
This commit is contained in:
parent
0bac72261d
commit
cea6204b37
94 changed files with 912 additions and 87 deletions
|
@ -9,82 +9,6 @@ go_library(
|
|||
"terraform.go",
|
||||
"variables.go",
|
||||
],
|
||||
embedsrcs = [
|
||||
"terraform/aws/.terraform.lock.hcl",
|
||||
"terraform/aws/main.tf",
|
||||
"terraform/aws/modules/instance_group/main.tf",
|
||||
"terraform/aws/modules/instance_group/variables.tf",
|
||||
"terraform/aws/modules/load_balancer_target/main.tf",
|
||||
"terraform/aws/modules/load_balancer_target/output.tf",
|
||||
"terraform/aws/modules/load_balancer_target/variables.tf",
|
||||
"terraform/aws/modules/public_private_subnet/main.tf",
|
||||
"terraform/aws/modules/public_private_subnet/output.tf",
|
||||
"terraform/aws/modules/public_private_subnet/variables.tf",
|
||||
"terraform/aws/outputs.tf",
|
||||
"terraform/aws/variables.tf",
|
||||
"terraform/azure/.terraform.lock.hcl",
|
||||
"terraform/azure/main.tf",
|
||||
"terraform/azure/modules/load_balancer_backend/main.tf",
|
||||
"terraform/azure/modules/load_balancer_backend/outputs.tf",
|
||||
"terraform/azure/modules/load_balancer_backend/variables.tf",
|
||||
"terraform/azure/modules/scale_set/main.tf",
|
||||
"terraform/azure/modules/scale_set/variables.tf",
|
||||
"terraform/azure/outputs.tf",
|
||||
"terraform/azure/variables.tf",
|
||||
"terraform/gcp/.terraform.lock.hcl",
|
||||
"terraform/gcp/main.tf",
|
||||
"terraform/gcp/modules/instance_group/main.tf",
|
||||
"terraform/gcp/modules/instance_group/outputs.tf",
|
||||
"terraform/gcp/modules/instance_group/variables.tf",
|
||||
"terraform/gcp/modules/loadbalancer/main.tf",
|
||||
"terraform/gcp/modules/loadbalancer/variables.tf",
|
||||
"terraform/gcp/outputs.tf",
|
||||
"terraform/gcp/variables.tf",
|
||||
"terraform/iam/aws/README.md",
|
||||
"terraform/iam/aws/main.tf",
|
||||
"terraform/iam/aws/outputs.tf",
|
||||
"terraform/iam/aws/variables.tf",
|
||||
"terraform/iam/azure/README.md",
|
||||
"terraform/iam/azure/main.tf",
|
||||
"terraform/iam/azure/outputs.tf",
|
||||
"terraform/iam/azure/variables.tf",
|
||||
"terraform/iam/gcp/README.md",
|
||||
"terraform/iam/gcp/main.tf",
|
||||
"terraform/iam/gcp/outputs.tf",
|
||||
"terraform/iam/gcp/variables.tf",
|
||||
"terraform/qemu/.terraform.lock.hcl",
|
||||
"terraform/qemu/main.tf",
|
||||
"terraform/qemu/modules/instance_group/domain.xsl",
|
||||
"terraform/qemu/modules/instance_group/main.tf",
|
||||
"terraform/qemu/modules/instance_group/outputs.tf",
|
||||
"terraform/qemu/modules/instance_group/variables.tf",
|
||||
"terraform/qemu/outputs.tf",
|
||||
"terraform/qemu/variables.tf",
|
||||
"terraform/openstack/.terraform.lock.hcl",
|
||||
"terraform/openstack/main.tf",
|
||||
"terraform/openstack/modules/instance_group/main.tf",
|
||||
"terraform/openstack/modules/instance_group/outputs.tf",
|
||||
"terraform/openstack/modules/instance_group/variables.tf",
|
||||
"terraform/openstack/modules/loadbalancer/main.tf",
|
||||
"terraform/openstack/modules/loadbalancer/variables.tf",
|
||||
"terraform/openstack/outputs.tf",
|
||||
"terraform/openstack/variables.tf",
|
||||
"terraform/qemu/modules/instance_group/tdx_domain.xsl",
|
||||
"terraform/iam/aws/.terraform.lock.hcl",
|
||||
"terraform/iam/azure/.terraform.lock.hcl",
|
||||
"terraform/iam/gcp/.terraform.lock.hcl",
|
||||
"terraform/gcp/modules/internal_load_balancer/main.tf",
|
||||
"terraform/gcp/modules/internal_load_balancer/variables.tf",
|
||||
"terraform/gcp/modules/jump_host/main.tf",
|
||||
"terraform/gcp/modules/jump_host/outputs.tf",
|
||||
"terraform/gcp/modules/jump_host/variables.tf",
|
||||
"terraform/aws/modules/jump_host/main.tf",
|
||||
"terraform/aws/modules/jump_host/output.tf",
|
||||
"terraform/aws/modules/jump_host/variables.tf",
|
||||
"terraform/azure/modules/jump_host/main.tf",
|
||||
"terraform/azure/modules/jump_host/variables.tf",
|
||||
"terraform/azure/modules/jump_host/outputs.tf",
|
||||
],
|
||||
importpath = "github.com/edgelesssys/constellation/v2/cli/internal/terraform",
|
||||
visibility = ["//cli:__subpackages__"],
|
||||
deps = [
|
||||
|
@ -92,6 +16,7 @@ go_library(
|
|||
"//internal/cloud/cloudprovider",
|
||||
"//internal/constants",
|
||||
"//internal/file",
|
||||
"//terraform",
|
||||
"@com_github_hashicorp_go_version//:go-version",
|
||||
"@com_github_hashicorp_hc_install//:hc-install",
|
||||
"@com_github_hashicorp_hc_install//fs",
|
||||
|
|
|
@ -7,22 +7,18 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"errors"
|
||||
"io/fs"
|
||||
slashpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/terraform"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
//go:embed terraform/*
|
||||
//go:embed terraform/*/.terraform.lock.hcl
|
||||
//go:embed terraform/iam/*/.terraform.lock.hcl
|
||||
var terraformFS embed.FS
|
||||
|
||||
// prepareWorkspace loads the embedded Terraform files,
|
||||
// and writes them into the workspace.
|
||||
func prepareWorkspace(rootDir string, fileHandler file.Handler, workingDir string) error {
|
||||
|
@ -32,7 +28,7 @@ func prepareWorkspace(rootDir string, fileHandler file.Handler, workingDir strin
|
|||
// terraformCopier copies the embedded Terraform files into the workspace.
|
||||
func terraformCopier(fileHandler file.Handler, rootDir, workingDir string) error {
|
||||
goEmbedRootDir := filepath.ToSlash(rootDir)
|
||||
return fs.WalkDir(terraformFS, goEmbedRootDir, func(path string, d fs.DirEntry, err error) error {
|
||||
return fs.WalkDir(terraform.Assets, goEmbedRootDir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -41,7 +37,7 @@ func terraformCopier(fileHandler file.Handler, rootDir, workingDir string) error
|
|||
}
|
||||
|
||||
goEmbedPath := filepath.ToSlash(path)
|
||||
content, err := terraformFS.ReadFile(goEmbedPath)
|
||||
content, err := terraform.Assets.ReadFile(goEmbedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/aws" {
|
||||
version = "5.17.0"
|
||||
constraints = "5.17.0"
|
||||
hashes = [
|
||||
"h1:+riTtJ8Tqjd6js1SGim+926BtDuxy8Jn4F+xV8LXvvg=",
|
||||
"h1:7XJ6tsfZR1m2RTHkJHmp7FtNxz8JP5Y/7p89RPebcAY=",
|
||||
"h1:A/Z75kGeHrP3euYJv1OaGfTVy63NXIeUj1YBHg4TdO0=",
|
||||
"h1:GVzgP42qi2UlVUyPqizWhFyaA9SpxhmAnPqwLKVxBqM=",
|
||||
"h1:IOvWK6rZ2e8AubIWAfKzqI+9AcG+QNPcMOZlujhO840=",
|
||||
"h1:OJMhYliR4PFDrTtOPocwq4NfuYZVGmmxwInPmcIC1x0=",
|
||||
"h1:U+EDfeUqefebA1h7KyBMD1xH0h311LMi7wijPDPkC/0=",
|
||||
"h1:WxtQKHotfGqgEJrV3Flb0CWziKxDfOz2RZUAIT09Uss=",
|
||||
"h1:fKgoYBRyK55vJSChUHPptDUQuXqjfDjVKVJ11+scq64=",
|
||||
"h1:lTrdAde+ANuM0Cn+RLFE0sOl2iFoWK9It0dIqi+xkv8=",
|
||||
"h1:pHssdSGtZ9R9lk3IfQIy20SpFjjZdBzvybDsT/y2cQA=",
|
||||
"h1:pcDQYPgf/6OQCapDcRV/RTvToi9qXHFsY16rvZx3vsY=",
|
||||
"h1:rplvK7UGP2FuzM44t2eRX+QYYPC0aUIoKdi5XayRI8M=",
|
||||
"h1:ytz93JU2mhkKFYWj0V5TV5GMH0v6cIekk485rn9me6A=",
|
||||
"zh:0087b9dd2c9c638fd63e527e5b9b70988008e263d480a199f180efe5a4f070f0",
|
||||
"zh:0fd532a4fd03ddef11f0502ff9fe4343443e1ae805cb088825a71d6d48906ec7",
|
||||
"zh:16411e731100cd15f7e165f53c23be784b2c86c2fcfd34781e0642d17090d342",
|
||||
"zh:251d520927e77f091e2ec6302e921d839a2430ac541c6a461aed7c08fb5eae12",
|
||||
"zh:4919e69682dc2a8c32d44f6ebc038a52c9f40af9c61cb574b64e322800d6a794",
|
||||
"zh:5334c60759d5f76bdc51355d1a3ebcc451d4d20f632f5c73b6e55c52b5dc9e52",
|
||||
"zh:7341a2b7247572eba0d0486094a870b872967702ec0ac7af728c2df2c30af4e5",
|
||||
"zh:81d1b1cb2cac6b3922a05adab69543b678f344a01debd54500263700dad7a288",
|
||||
"zh:882bc8e15ef6d4020a07321ec4c056977c5c1d96934118032922561d29504d43",
|
||||
"zh:8cd4871ef2b03fd916de1a6dc7eb8a81a354c421177d4334a2e3308e50215e41",
|
||||
"zh:97e12fe6529b21298adf1046c5e20ac35d0569c836a6f385ff041e257e00cfd2",
|
||||
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
|
||||
"zh:9f5baf5d59b9f3cf5504d1fa975f10f27da3791896a9e18ece47c258bac17634",
|
||||
"zh:dffafba6731ac1db1c540bdbd6a8c878486b71de9d0ca1d23c5c00a6c3c14d80",
|
||||
"zh:fa7440c3c15a42fc5731444d324ced75407d417bfe3184661ae47d40a9718dce",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.5.1"
|
||||
constraints = "3.5.1"
|
||||
hashes = [
|
||||
"h1:0ULxM8/DscMzfiDWg1yclBf/39U44wQmlx745BfYZ80=",
|
||||
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
|
||||
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
|
||||
"h1:6ePAACdONiMGe1j5pwUc0gpDFt82y/ka0zRimMg/geM=",
|
||||
"h1:BD3Y4CcrGHb9sx+Bl5V8M2PSyw23mykzXSwj+/6FhHA=",
|
||||
"h1:HGeb7Tajn7HZwX0MhrdyL57LoCSz5GMcI2wbHs12D4U=",
|
||||
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
|
||||
"h1:JiENkIxSWc32/2Dtd1n4CWY3ow/PHvAeGhdgcOLpWZM=",
|
||||
"h1:MROYZuKGTuaTNf2FgbwCgSVpteQW25ubnb+Xfok2jvk=",
|
||||
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
|
||||
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
|
||||
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
|
||||
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
|
||||
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
|
||||
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
|
||||
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
|
||||
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
|
||||
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
|
||||
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
|
||||
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
|
||||
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
|
||||
]
|
||||
}
|
|
@ -1,259 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.17.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Configure the AWS Provider
|
||||
provider "aws" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
locals {
|
||||
uid = random_id.uid.hex
|
||||
name = "${var.name}-${local.uid}"
|
||||
initSecretHash = random_password.initSecret.bcrypt_hash
|
||||
cidr_vpc_subnet_nodes = "192.168.176.0/20"
|
||||
ports_node_range = "30000-32767"
|
||||
load_balancer_ports = flatten([
|
||||
{ name = "kubernetes", port = "6443", health_check = "HTTPS" },
|
||||
{ name = "bootstrapper", port = "9000", health_check = "TCP" },
|
||||
{ name = "verify", port = "30081", health_check = "TCP" },
|
||||
{ name = "konnectivity", port = "8132", health_check = "TCP" },
|
||||
{ name = "recovery", port = "9999", health_check = "TCP" },
|
||||
{ name = "join", port = "30090", health_check = "TCP" },
|
||||
var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [],
|
||||
])
|
||||
target_group_arns = {
|
||||
control-plane : [
|
||||
for port in local.load_balancer_ports : module.load_balancer_targets[port.name].target_group_arn
|
||||
]
|
||||
worker : []
|
||||
}
|
||||
iam_instance_profile = {
|
||||
control-plane : var.iam_instance_profile_control_plane
|
||||
worker : var.iam_instance_profile_worker_nodes
|
||||
}
|
||||
# zones are all availability zones that are used by the node groups
|
||||
zones = distinct(sort([
|
||||
for node_group in var.node_groups : node_group.zone
|
||||
]))
|
||||
// wildcard_lb_dns_name is the DNS name of the load balancer with a wildcard for the name.
|
||||
// example: given "name-1234567890.region.elb.amazonaws.com" it will return "*.region.elb.amazonaws.com"
|
||||
wildcard_lb_dns_name = replace(aws_lb.front_end.dns_name, "/^[^.]*\\./", "*.")
|
||||
|
||||
tags = {
|
||||
constellation-uid = local.uid,
|
||||
}
|
||||
|
||||
in_cluster_endpoint = aws_lb.front_end.dns_name
|
||||
out_of_cluster_endpoint = var.internal_load_balancer && var.debug ? module.jump_host[0].ip : local.in_cluster_endpoint
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
resource "random_password" "initSecret" {
|
||||
length = 32
|
||||
special = true
|
||||
override_special = "_%@"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "vpc" {
|
||||
cidr_block = "192.168.0.0/16"
|
||||
tags = merge(local.tags, { Name = "${local.name}-vpc" })
|
||||
}
|
||||
|
||||
module "public_private_subnet" {
|
||||
source = "./modules/public_private_subnet"
|
||||
name = local.name
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
cidr_vpc_subnet_nodes = local.cidr_vpc_subnet_nodes
|
||||
cidr_vpc_subnet_internet = "192.168.0.0/20"
|
||||
zone = var.zone
|
||||
zones = local.zones
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_eip" "lb" {
|
||||
# TODO(malt3): use for_each = toset(module.public_private_subnet.all_zones)
|
||||
# in a future version to support all availability zones in the chosen region
|
||||
# This should only be done after we migrated to DNS-based addressing for the
|
||||
# control-plane.
|
||||
for_each = var.internal_load_balancer ? [] : toset([var.zone])
|
||||
domain = "vpc"
|
||||
tags = merge(local.tags, { "constellation-ip-endpoint" = each.key == var.zone ? "legacy-primary-zone" : "additional-zone" })
|
||||
}
|
||||
|
||||
resource "aws_lb" "front_end" {
|
||||
name = "${local.name}-loadbalancer"
|
||||
internal = var.internal_load_balancer
|
||||
load_balancer_type = "network"
|
||||
tags = local.tags
|
||||
security_groups = [aws_security_group.security_group.id]
|
||||
|
||||
dynamic "subnet_mapping" {
|
||||
# TODO(malt3): use for_each = toset(module.public_private_subnet.all_zones)
|
||||
# in a future version to support all availability zones in the chosen region
|
||||
# without needing to constantly replace the loadbalancer.
|
||||
# This has to wait until the bootstrapper that we upgrade from (source version) use
|
||||
# DNS-based addressing for the control-plane.
|
||||
# for_each = toset(module.public_private_subnet.all_zones)
|
||||
for_each = toset([var.zone])
|
||||
content {
|
||||
subnet_id = module.public_private_subnet.public_subnet_id[subnet_mapping.key]
|
||||
allocation_id = var.internal_load_balancer ? "" : aws_eip.lb[subnet_mapping.key].id
|
||||
}
|
||||
}
|
||||
enable_cross_zone_load_balancing = true
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [security_groups]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "security_group" {
|
||||
name = local.name
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
description = "Security group for ${local.name}"
|
||||
tags = local.tags
|
||||
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
description = "Allow all outbound traffic"
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = split("-", local.ports_node_range)[0]
|
||||
to_port = split("-", local.ports_node_range)[1]
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
description = "K8s node ports"
|
||||
}
|
||||
|
||||
dynamic "ingress" {
|
||||
for_each = local.load_balancer_ports
|
||||
content {
|
||||
description = ingress.value.name
|
||||
from_port = ingress.value.port
|
||||
to_port = ingress.value.port
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = [aws_vpc.vpc.cidr_block]
|
||||
description = "allow all internal"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_log_group" "log_group" {
|
||||
name = local.name
|
||||
retention_in_days = 30
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
module "load_balancer_targets" {
|
||||
for_each = { for port in local.load_balancer_ports : port.name => port }
|
||||
source = "./modules/load_balancer_target"
|
||||
name = "${local.name}-${each.value.name}"
|
||||
port = each.value.port
|
||||
healthcheck_protocol = each.value.health_check
|
||||
healthcheck_path = each.value.name == "kubernetes" ? "/readyz" : ""
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
lb_arn = aws_lb.front_end.arn
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
module "instance_group" {
|
||||
source = "./modules/instance_group"
|
||||
for_each = var.node_groups
|
||||
base_name = local.name
|
||||
node_group_name = each.key
|
||||
role = each.value.role
|
||||
zone = each.value.zone
|
||||
uid = local.uid
|
||||
instance_type = each.value.instance_type
|
||||
initial_count = each.value.initial_count
|
||||
image_id = var.ami
|
||||
state_disk_type = each.value.disk_type
|
||||
state_disk_size = each.value.disk_size
|
||||
target_group_arns = local.target_group_arns[each.value.role]
|
||||
security_groups = [aws_security_group.security_group.id]
|
||||
subnetwork = module.public_private_subnet.private_subnet_id[each.value.zone]
|
||||
iam_instance_profile = local.iam_instance_profile[each.value.role]
|
||||
enable_snp = var.enable_snp
|
||||
tags = merge(
|
||||
local.tags,
|
||||
{ Name = "${local.name}-${each.value.role}" },
|
||||
{ constellation-role = each.value.role },
|
||||
{ constellation-node-group = each.key },
|
||||
{ constellation-uid = local.uid },
|
||||
{ constellation-init-secret-hash = local.initSecretHash },
|
||||
{ "kubernetes.io/cluster/${local.name}" = "owned" }
|
||||
)
|
||||
}
|
||||
|
||||
module "jump_host" {
|
||||
count = var.internal_load_balancer && var.debug ? 1 : 0
|
||||
source = "./modules/jump_host"
|
||||
base_name = local.name
|
||||
subnet_id = module.public_private_subnet.public_subnet_id[var.zone]
|
||||
lb_internal_ip = aws_lb.front_end.dns_name
|
||||
ports = [for port in local.load_balancer_ports : port.port]
|
||||
iam_instance_profile = var.iam_instance_profile_worker_nodes
|
||||
security_group_id = aws_security_group.security_group.id
|
||||
}
|
||||
|
||||
# TODO(31u3r): Remove once 2.12 is released
|
||||
moved {
|
||||
from = module.load_balancer_target_konnectivity
|
||||
to = module.load_balancer_targets["konnectivity"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.load_balancer_target_verify
|
||||
to = module.load_balancer_targets["verify"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.load_balancer_target_recovery
|
||||
to = module.load_balancer_targets["recovery"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.load_balancer_target_join
|
||||
to = module.load_balancer_targets["join"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.load_balancer_target_debugd[0]
|
||||
to = module.load_balancer_targets["debugd"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.load_balancer_target_kubernetes
|
||||
to = module.load_balancer_targets["kubernetes"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.load_balancer_target_bootstrapper
|
||||
to = module.load_balancer_targets["bootstrapper"]
|
||||
}
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.17.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
group_uid = random_id.uid.hex
|
||||
name = "${var.base_name}-${lower(var.role)}-${local.group_uid}"
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
resource "aws_launch_template" "launch_template" {
|
||||
name_prefix = local.name
|
||||
image_id = var.image_id
|
||||
instance_type = var.instance_type
|
||||
iam_instance_profile {
|
||||
name = var.iam_instance_profile
|
||||
}
|
||||
vpc_security_group_ids = var.security_groups
|
||||
metadata_options {
|
||||
http_endpoint = "enabled"
|
||||
http_tokens = "required"
|
||||
instance_metadata_tags = "disabled"
|
||||
http_put_response_hop_limit = 2
|
||||
}
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "/dev/sdb"
|
||||
ebs {
|
||||
volume_size = var.state_disk_size
|
||||
volume_type = var.state_disk_type
|
||||
encrypted = true
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
# See: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template#cpu-options
|
||||
cpu_options {
|
||||
# use "enabled" to enable SEV-SNP
|
||||
# use "disabled" to disable SEV-SNP (but still require SNP-capable hardware)
|
||||
# use null to leave the setting unset (allows non-SNP-capable hardware to be used)
|
||||
amd_sev_snp = var.enable_snp ? "enabled" : null
|
||||
# Disable SMT. We are already disabling it inside the image.
|
||||
# Disabling SMT only in the image, not in the Hypervisor creates problems.
|
||||
# Thus, also disable it in the Hypervisor.
|
||||
# TODO(derpsteb): reenable once AWS confirms it's safe to do so.
|
||||
# threads_per_core = 1
|
||||
# When setting threads_per_core we also have to set core_count.
|
||||
# For the currently supported SNP instance families (C6a, M6a, R6a) default_cores
|
||||
# equals the maximum number of available cores.
|
||||
# core_count = data.aws_ec2_instance_type.instance_data.default_cores
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = [
|
||||
cpu_options, # required. we cannot change the CPU options of a launch template
|
||||
name_prefix, # required. Allow legacy scale sets to keep their old names
|
||||
default_version, # required. update procedure creates new versions of the launch template
|
||||
image_id, # required. update procedure modifies the image id externally
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "autoscaling_group" {
|
||||
name = local.name
|
||||
launch_template {
|
||||
id = aws_launch_template.launch_template.id
|
||||
}
|
||||
min_size = 1
|
||||
max_size = 10
|
||||
desired_capacity = var.initial_count
|
||||
vpc_zone_identifier = [var.subnetwork]
|
||||
target_group_arns = var.target_group_arns
|
||||
|
||||
# TODO(msanft): Remove this (to have the 10m default) once AWS SEV-SNP boot problems are resolved.
|
||||
# Set a higher timeout for the ASG to fulfill the desired healthy capcity. Temporary workaround to
|
||||
# long boot times on SEV-SNP machines on AWS.
|
||||
wait_for_capacity_timeout = var.enable_snp ? "20m" : "10m"
|
||||
|
||||
dynamic "tag" {
|
||||
for_each = var.tags
|
||||
content {
|
||||
key = tag.key
|
||||
value = tag.value
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = [
|
||||
name, # required. Allow legacy scale sets to keep their old names
|
||||
launch_template.0.version, # required. update procedure creates new versions of the launch template
|
||||
min_size, # required. autoscaling modifies the instance count externally
|
||||
max_size, # required. autoscaling modifies the instance count externally
|
||||
desired_capacity, # required. autoscaling modifies the instance count externally
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_ec2_instance_type" "instance_data" {
|
||||
instance_type = var.instance_type
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
variable "base_name" {
|
||||
type = string
|
||||
description = "Base name of the instance group."
|
||||
}
|
||||
|
||||
variable "node_group_name" {
|
||||
type = string
|
||||
description = "Constellation name for the node group (used for configuration and CSP-independent naming)."
|
||||
}
|
||||
|
||||
variable "role" {
|
||||
type = string
|
||||
description = "The role of the instance group."
|
||||
validation {
|
||||
condition = contains(["control-plane", "worker"], var.role)
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "uid" {
|
||||
type = string
|
||||
description = "UID of the cluster. This is used for tags."
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
type = string
|
||||
description = "Instance type for the nodes."
|
||||
}
|
||||
|
||||
variable "initial_count" {
|
||||
type = number
|
||||
description = "Number of instances in the instance group."
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
type = string
|
||||
description = "Image ID for the nodes."
|
||||
}
|
||||
|
||||
variable "state_disk_type" {
|
||||
type = string
|
||||
description = "EBS disk type for the state disk of the nodes."
|
||||
}
|
||||
|
||||
variable "state_disk_size" {
|
||||
type = number
|
||||
description = "Disk size for the state disk of the nodes [GB]."
|
||||
}
|
||||
|
||||
variable "target_group_arns" {
|
||||
type = list(string)
|
||||
description = "ARN of the target group."
|
||||
}
|
||||
|
||||
variable "subnetwork" {
|
||||
type = string
|
||||
description = "Name of the subnetwork to use."
|
||||
}
|
||||
|
||||
variable "iam_instance_profile" {
|
||||
type = string
|
||||
description = "IAM instance profile for the nodes."
|
||||
}
|
||||
|
||||
variable "security_groups" {
|
||||
type = list(string)
|
||||
description = "List of IDs of the security groups for an instance."
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
description = "The tags to add to the instance group."
|
||||
}
|
||||
|
||||
variable "enable_snp" {
|
||||
type = bool
|
||||
default = true
|
||||
description = "Enable AMD SEV SNP. Setting this to true sets the cpu-option AmdSevSnp to enable."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "Zone to deploy the instance group in."
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.17.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
data "aws_ami" "ubuntu" {
|
||||
most_recent = true
|
||||
owners = ["099720109477"] # Canonical
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_instance" "jump_host" {
|
||||
ami = data.aws_ami.ubuntu.id
|
||||
instance_type = "c5a.large"
|
||||
associate_public_ip_address = true
|
||||
|
||||
iam_instance_profile = var.iam_instance_profile
|
||||
subnet_id = var.subnet_id
|
||||
security_groups = [var.security_group_id]
|
||||
|
||||
tags = {
|
||||
"Name" = "${var.base_name}-jump-host"
|
||||
}
|
||||
|
||||
user_data = <<EOF
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
# Uncomment to create user with password
|
||||
# useradd -m user
|
||||
# usermod -aG sudo user
|
||||
# usermod --shell /bin/bash user
|
||||
# sh -c "echo \"user:pass\" | chpasswd"
|
||||
|
||||
sysctl -w net.ipv4.ip_forward=1
|
||||
sysctl -p
|
||||
|
||||
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
|
||||
lb_ip=${var.lb_internal_ip}
|
||||
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
lb_ip=$(dig +short ${var.lb_internal_ip})
|
||||
fi
|
||||
%{for port in var.ports~}
|
||||
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination $${lb_ip}:${port}
|
||||
iptables -t nat -A POSTROUTING -p tcp -d $${lb_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
|
||||
%{endfor~}
|
||||
EOF
|
||||
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
output "ip" {
|
||||
value = aws_instance.jump_host.public_ip
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
variable "base_name" {
|
||||
description = "Base name of the jump host"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "Subnet ID to deploy the jump host into"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_internal_ip" {
|
||||
description = "Internal IP of the load balancer"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "iam_instance_profile" {
|
||||
description = "IAM instance profile to attach to the jump host"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ports" {
|
||||
description = "Ports to forward to the load balancer"
|
||||
type = list(number)
|
||||
}
|
||||
|
||||
variable "security_group_id" {
|
||||
description = "Security group to attach to the jump host"
|
||||
type = string
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.17.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_lb_target_group" "front_end" {
|
||||
name = var.name
|
||||
port = var.port
|
||||
protocol = "TCP"
|
||||
vpc_id = var.vpc_id
|
||||
tags = var.tags
|
||||
preserve_client_ip = "false"
|
||||
|
||||
health_check {
|
||||
port = var.port
|
||||
protocol = var.healthcheck_protocol
|
||||
path = var.healthcheck_protocol == "HTTPS" ? var.healthcheck_path : null
|
||||
interval = 10
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_lb_listener" "front_end" {
|
||||
load_balancer_arn = var.lb_arn
|
||||
port = var.port
|
||||
protocol = "TCP"
|
||||
tags = var.tags
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.front_end.arn
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
output "target_group_arn" {
|
||||
value = aws_lb_target_group.front_end.arn
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Name of the load balancer target."
|
||||
}
|
||||
|
||||
variable "port" {
|
||||
type = string
|
||||
description = "Port of the load balancer target."
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
type = string
|
||||
description = "ID of the VPC."
|
||||
}
|
||||
|
||||
variable "lb_arn" {
|
||||
type = string
|
||||
description = "ARN of the load balancer."
|
||||
}
|
||||
|
||||
variable "healthcheck_protocol" {
|
||||
type = string
|
||||
default = "TCP"
|
||||
description = "Type of the load balancer target."
|
||||
}
|
||||
|
||||
variable "healthcheck_path" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Path for health check."
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
description = "The tags to add to the loadbalancer."
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.17.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
# az_number is a stable mapping of az suffix to a number used for calculating the subnet cidr
|
||||
az_number = {
|
||||
# we start counting at 2 to have the legacy subnet before the first newly created networks
|
||||
# the legacy subnet did not start at a /20 boundary
|
||||
# 0 => 192.168.176.0/24 (unused private subnet cidr)
|
||||
# 1 => 192.168.177.0/24 (unused private subnet cidr)
|
||||
legacy = 2 # => 192.168.178.0/24 (legacy private subnet)
|
||||
a = 3 # => 192.168.179.0/24 (first newly created zonal private subnet)
|
||||
b = 4
|
||||
c = 5
|
||||
d = 6
|
||||
e = 7
|
||||
f = 8
|
||||
g = 9
|
||||
h = 10
|
||||
i = 11
|
||||
j = 12
|
||||
k = 13
|
||||
l = 14
|
||||
m = 15 # => 192.168.191.0/24 (last reserved zonal private subnet cidr). In reality, AWS doesn't have that many zones in a region.
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
state = "available"
|
||||
}
|
||||
|
||||
data "aws_availability_zone" "all" {
|
||||
for_each = toset(data.aws_availability_zones.available.names)
|
||||
|
||||
name = each.key
|
||||
}
|
||||
|
||||
resource "aws_eip" "nat" {
|
||||
for_each = toset(var.zones)
|
||||
domain = "vpc"
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_subnet" "private" {
|
||||
for_each = data.aws_availability_zone.all
|
||||
vpc_id = var.vpc_id
|
||||
cidr_block = cidrsubnet(var.cidr_vpc_subnet_nodes, 4, local.az_number[each.value.name_suffix])
|
||||
availability_zone = each.key
|
||||
tags = merge(var.tags, { Name = "${var.name}-subnet-nodes" }, { "kubernetes.io/role/internal-elb" = 1 }) # aws-load-balancer-controller needs role annotation
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
cidr_block, # required. Legacy subnets used fixed cidr blocks for the single zone that don't match the new scheme.
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "public" {
|
||||
for_each = data.aws_availability_zone.all
|
||||
vpc_id = var.vpc_id
|
||||
cidr_block = cidrsubnet(var.cidr_vpc_subnet_internet, 4, local.az_number[each.value.name_suffix])
|
||||
availability_zone = each.key
|
||||
tags = merge(var.tags, { Name = "${var.name}-subnet-internet" }, { "kubernetes.io/role/elb" = 1 }) # aws-load-balancer-controller needs role annotation
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
cidr_block, # required. Legacy subnets used fixed cidr blocks for the single zone that don't match the new scheme.
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "gw" {
|
||||
vpc_id = var.vpc_id
|
||||
tags = merge(var.tags, { Name = "${var.name}-internet-gateway" })
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "gw" {
|
||||
for_each = toset(var.zones)
|
||||
subnet_id = aws_subnet.public[each.key].id
|
||||
allocation_id = aws_eip.nat[each.key].id
|
||||
tags = merge(var.tags, { Name = "${var.name}-nat-gateway" })
|
||||
}
|
||||
|
||||
resource "aws_route_table" "private_nat" {
|
||||
for_each = toset(var.zones)
|
||||
vpc_id = var.vpc_id
|
||||
tags = merge(var.tags, { Name = "${var.name}-private-nat" })
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
nat_gateway_id = aws_nat_gateway.gw[each.key].id
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table" "public_igw" {
|
||||
for_each = toset(var.zones)
|
||||
vpc_id = var.vpc_id
|
||||
tags = merge(var.tags, { Name = "${var.name}-public-igw" })
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
gateway_id = aws_internet_gateway.gw.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "private_nat" {
|
||||
for_each = toset(var.zones)
|
||||
subnet_id = aws_subnet.private[each.key].id
|
||||
route_table_id = aws_route_table.private_nat[each.key].id
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "route_to_internet" {
|
||||
for_each = toset(var.zones)
|
||||
subnet_id = aws_subnet.public[each.key].id
|
||||
route_table_id = aws_route_table.public_igw[each.key].id
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
output "private_subnet_id" {
|
||||
value = {
|
||||
for az in data.aws_availability_zone.all :
|
||||
az.name => aws_subnet.private[az.name].id
|
||||
}
|
||||
}
|
||||
|
||||
output "public_subnet_id" {
|
||||
value = {
|
||||
for az in data.aws_availability_zone.all :
|
||||
az.name => aws_subnet.public[az.name].id
|
||||
}
|
||||
}
|
||||
|
||||
# all_zones is a list of all availability zones in the region
|
||||
# it also contains zones that are not currently used by node groups (but might be in the future)
|
||||
output "all_zones" {
|
||||
value = distinct(sort([for az in data.aws_availability_zone.all : az.name]))
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Name of your Constellation, which is used as a prefix for tags."
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
type = string
|
||||
description = "ID of the VPC."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "Main availability zone. Only used for legacy reasons."
|
||||
}
|
||||
|
||||
variable "zones" {
|
||||
type = list(string)
|
||||
description = "Availability zones."
|
||||
}
|
||||
|
||||
variable "cidr_vpc_subnet_nodes" {
|
||||
type = string
|
||||
description = "CIDR block for the subnet that will contain the nodes."
|
||||
}
|
||||
|
||||
variable "cidr_vpc_subnet_internet" {
|
||||
type = string
|
||||
description = "CIDR block for the subnet that contains resources reachable from the Internet."
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
description = "The tags to add to the resource."
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
output "out_of_cluster_endpoint" {
|
||||
value = local.out_of_cluster_endpoint
|
||||
}
|
||||
|
||||
output "in_cluster_endpoint" {
|
||||
value = local.in_cluster_endpoint
|
||||
}
|
||||
output "api_server_cert_sans" {
|
||||
value = sort(
|
||||
distinct(
|
||||
concat(
|
||||
[
|
||||
local.in_cluster_endpoint,
|
||||
local.out_of_cluster_endpoint,
|
||||
],
|
||||
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
output "uid" {
|
||||
value = local.uid
|
||||
}
|
||||
|
||||
output "initSecret" {
|
||||
value = random_password.initSecret.result
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = local.name
|
||||
}
|
||||
|
||||
output "ip_cidr_nodes" {
|
||||
value = local.cidr_vpc_subnet_nodes
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Name of your Constellation"
|
||||
validation {
|
||||
condition = length(var.name) <= 10
|
||||
error_message = "The length of the name of the Constellation must be <= 10 characters"
|
||||
}
|
||||
validation {
|
||||
condition = var.name == lower(var.name)
|
||||
error_message = "The name of the Constellation must be in lowercase"
|
||||
}
|
||||
}
|
||||
|
||||
variable "node_groups" {
|
||||
type = map(object({
|
||||
role = string
|
||||
initial_count = optional(number)
|
||||
instance_type = string
|
||||
disk_size = number
|
||||
disk_type = string
|
||||
zone = string
|
||||
}))
|
||||
description = "A map of node group names to node group configurations."
|
||||
validation {
|
||||
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "iam_instance_profile_worker_nodes" {
|
||||
type = string
|
||||
description = "Name of the IAM instance profile for worker nodes"
|
||||
}
|
||||
|
||||
variable "iam_instance_profile_control_plane" {
|
||||
type = string
|
||||
description = "Name of the IAM instance profile for control plane nodes"
|
||||
}
|
||||
|
||||
variable "ami" {
|
||||
type = string
|
||||
description = "AMI ID"
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "The image_id value must be a valid AMI id, starting with \"ami-\"."
|
||||
}
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "The AWS region to create the cluster in"
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "The AWS availability zone name to create the cluster in"
|
||||
}
|
||||
|
||||
variable "debug" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Enable debug mode. This opens up a debugd port that can be used to deploy a custom bootstrapper."
|
||||
}
|
||||
|
||||
variable "enable_snp" {
|
||||
type = bool
|
||||
default = true
|
||||
description = "Enable AMD SEV SNP. Setting this to true sets the cpu-option AmdSevSnp to enable."
|
||||
}
|
||||
|
||||
variable "custom_endpoint" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
|
||||
}
|
||||
|
||||
variable "internal_load_balancer" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Use an internal load balancer."
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/azurerm" {
|
||||
version = "3.74.0"
|
||||
constraints = "3.74.0"
|
||||
hashes = [
|
||||
"h1:1kSiowd/tBNswp3iv7ePlzkP5llWihjHcY3pdXdJqVU=",
|
||||
"h1:4b15khHtc5OkIVEFg0W5QRwf/ov1WVQkXVdSiAcTCS8=",
|
||||
"h1:ETVZfmulZQ435+lgFCkZRpfVOLyAxfDOwbPXFg3aLLQ=",
|
||||
"h1:H3diAufZ5VDQKsQNYykVRaFTOUJ4gjFiT2VLYi574+w=",
|
||||
"h1:LEdK8BxNSNiBQbtcJhQZKMMHDjmPpUsvDpr3Mzs93Tg=",
|
||||
"h1:OtJKZcMwrRNR84ylT1GgMwGR8KTxVOCkNifbjABlGj0=",
|
||||
"h1:Rq+CNb+4u47dw20tlAeI2yxSOuDtLm+S/GZO2pneLyA=",
|
||||
"h1:VfBB00BE0wvFiod7BlL+Cn6r2599MEi94hnAQ277ux8=",
|
||||
"h1:YJ15rwD0G7lYc9OVh5GO4VTqcd2jhqegfgyqTJH1M/I=",
|
||||
"h1:YvxxiqiwXjZdU53u3b9q49ezsIAb59KmdLLFkwkwFAs=",
|
||||
"h1:xDRmcV40KrWttPYg/w0/IN/frS9K1twuyvqRNVZko44=",
|
||||
"zh:0424c70152f949da1ec52ba96d20e5fd32fd22d9bd9203ce045d5f6aab3d20fc",
|
||||
"zh:16dbf581d10f8e7937185bcdcceb4f91d08c919e452fb8da7580071288c8c397",
|
||||
"zh:3019103bc2c3b4e185f5c65696c349697644c968f5c085af5505fed6d01c4241",
|
||||
"zh:49bb56ebaed6653fdb913c2b2bb74fc8b5399e7258d1e89084f72c44ea1130dd",
|
||||
"zh:85547666517f899d88620bd23a000a8f43c7dc93587c350eb1ea17bcb3e645c7",
|
||||
"zh:8bed8b646ff1822d8764de68b56b71e5dd971a4b77eba80d47f400a530800bea",
|
||||
"zh:8bfa6c70c004ba05ebce47f74f49ce872c28a68a18bb71b281a9681bcbbdbfa1",
|
||||
"zh:a2ae9e38fda0695fb8aa810e4f1ce4b104bfda651a87923b307bb1728680d8b6",
|
||||
"zh:beac1efe32f99072c892095f5ff46e40d6852b66679a03bc3acbe1b90fb1f653",
|
||||
"zh:d8a6ca20e49ebe7ea5688d91233d571e2c2ccc3e41000c39a7d7031df209ea8e",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
"zh:f937b5fdf49b072c0347408d0a1c5a5d822dae1a23252915930e5a82d1d8ce8b",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.5.1"
|
||||
constraints = "3.5.1"
|
||||
hashes = [
|
||||
"h1:0ULxM8/DscMzfiDWg1yclBf/39U44wQmlx745BfYZ80=",
|
||||
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
|
||||
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
|
||||
"h1:6ePAACdONiMGe1j5pwUc0gpDFt82y/ka0zRimMg/geM=",
|
||||
"h1:BD3Y4CcrGHb9sx+Bl5V8M2PSyw23mykzXSwj+/6FhHA=",
|
||||
"h1:HGeb7Tajn7HZwX0MhrdyL57LoCSz5GMcI2wbHs12D4U=",
|
||||
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
|
||||
"h1:JiENkIxSWc32/2Dtd1n4CWY3ow/PHvAeGhdgcOLpWZM=",
|
||||
"h1:MROYZuKGTuaTNf2FgbwCgSVpteQW25ubnb+Xfok2jvk=",
|
||||
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
|
||||
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
|
||||
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
|
||||
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
|
||||
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
|
||||
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
|
||||
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
|
||||
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
|
||||
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
|
||||
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
|
||||
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
|
||||
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/tls" {
|
||||
version = "4.0.4"
|
||||
hashes = [
|
||||
"h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=",
|
||||
"h1:Wd3RqmQW60k2QWPN4sK5CtjGuO1d+CRNXgC+D4rKtXc=",
|
||||
"h1:bNsvpX5EGuVxgGRXBQVLXlmq40PdoLp8Rfuh1ZmV7yY=",
|
||||
"h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=",
|
||||
"h1:rKKMyIEBZwR+8j6Tx3PwqBrStuH+J+pxcbCR5XN8WAw=",
|
||||
"zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
|
||||
"zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
|
||||
"zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
|
||||
"zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
|
||||
"zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
|
||||
"zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
|
||||
"zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
|
||||
"zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
|
||||
"zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
|
||||
"zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
|
||||
"zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
]
|
||||
}
|
|
@ -1,290 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "3.74.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
features {
|
||||
resource_group {
|
||||
prevent_deletion_if_contains_resources = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
uid = random_id.uid.hex
|
||||
name = "${var.name}-${local.uid}"
|
||||
initSecretHash = random_password.initSecret.bcrypt_hash
|
||||
tags = {
|
||||
constellation-uid = local.uid,
|
||||
}
|
||||
ports_node_range = "30000-32767"
|
||||
cidr_vpc_subnet_nodes = "192.168.178.0/24"
|
||||
ports = flatten([
|
||||
{ name = "kubernetes", port = "6443", health_check_protocol = "Https", path = "/readyz", priority = 100 },
|
||||
{ name = "bootstrapper", port = "9000", health_check_protocol = "Tcp", path = null, priority = 101 },
|
||||
{ name = "verify", port = "30081", health_check_protocol = "Tcp", path = null, priority = 102 },
|
||||
{ name = "konnectivity", port = "8132", health_check_protocol = "Tcp", path = null, priority = 103 },
|
||||
{ name = "recovery", port = "9999", health_check_protocol = "Tcp", path = null, priority = 104 },
|
||||
{ name = "join", port = "30090", health_check_protocol = "Tcp", path = null, priority = 105 },
|
||||
var.debug ? [{ name = "debugd", port = "4000", health_check_protocol = "Tcp", path = null, priority = 106 }] : [],
|
||||
])
|
||||
// wildcard_lb_dns_name is the DNS name of the load balancer with a wildcard for the name.
|
||||
// example: given "name-1234567890.location.cloudapp.azure.com" it will return "*.location.cloudapp.azure.com"
|
||||
wildcard_lb_dns_name = var.internal_load_balancer ? "" : replace(data.azurerm_public_ip.loadbalancer_ip[0].fqdn, "/^[^.]*\\./", "*.")
|
||||
// deduce from format (subscriptions)/$ID/resourceGroups/$RG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$NAME"
|
||||
// move from the right as to ignore the optional prefixes
|
||||
uai_resource_group = element(split("/", var.user_assigned_identity), length(split("/", var.user_assigned_identity)) - 5)
|
||||
// deduce as above
|
||||
uai_name = element(split("/", var.user_assigned_identity), length(split("/", var.user_assigned_identity)) - 1)
|
||||
|
||||
in_cluster_endpoint = var.internal_load_balancer ? azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address : azurerm_public_ip.loadbalancer_ip[0].ip_address
|
||||
out_of_cluster_endpoint = var.debug && var.internal_load_balancer ? module.jump_host[0].ip : local.in_cluster_endpoint
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
resource "random_password" "initSecret" {
|
||||
length = 32
|
||||
special = true
|
||||
override_special = "_%@"
|
||||
}
|
||||
|
||||
resource "azurerm_attestation_provider" "attestation_provider" {
|
||||
count = var.create_maa ? 1 : 0
|
||||
# name must be between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
name = format("constell%s", local.uid)
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
|
||||
lifecycle {
|
||||
# Attestation policies will be set automatically upon creation, even if not specified in the resource,
|
||||
# while they aren't being incorporated into the Terraform state correctly.
|
||||
# To prevent them from being set to null when applying an upgrade, ignore the changes until the issue
|
||||
# is resolved by Azure.
|
||||
# Related issue: https://github.com/hashicorp/terraform-provider-azurerm/issues/21998
|
||||
ignore_changes = [open_enclave_policy_base64, sgx_enclave_policy_base64, tpm_policy_base64, sev_snp_policy_base64]
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_application_insights" "insights" {
|
||||
name = local.name
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group
|
||||
application_type = "other"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "loadbalancer_ip" {
|
||||
count = var.internal_load_balancer ? 0 : 1
|
||||
name = "${local.name}-lb"
|
||||
domain_name_label = local.name
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
allocation_method = "Static"
|
||||
sku = "Standard"
|
||||
tags = local.tags
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [name]
|
||||
}
|
||||
}
|
||||
|
||||
// Reads data from the resource of the same name.
|
||||
// Used to wait to the actual resource to become ready, before using data from that resource.
|
||||
// Property "fqdn" only becomes available on azurerm_public_ip resources once domain_name_label is set.
|
||||
// Since we are setting domain_name_label starting with 2.10 we need to migrate
|
||||
// resources for clusters created before 2.9. In those cases we need to wait until loadbalancer_ip has
|
||||
// been updated before reading from it.
|
||||
data "azurerm_public_ip" "loadbalancer_ip" {
|
||||
count = var.internal_load_balancer ? 0 : 1
|
||||
name = "${local.name}-lb"
|
||||
resource_group_name = var.resource_group
|
||||
depends_on = [azurerm_public_ip.loadbalancer_ip]
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "nat_gateway_ip" {
|
||||
name = "${local.name}-nat"
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
allocation_method = "Static"
|
||||
sku = "Standard"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "azurerm_nat_gateway" "gateway" {
|
||||
name = local.name
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group
|
||||
sku_name = "Standard"
|
||||
idle_timeout_in_minutes = 10
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_nat_gateway_association" "example" {
|
||||
nat_gateway_id = azurerm_nat_gateway.gateway.id
|
||||
subnet_id = azurerm_subnet.node_subnet.id
|
||||
}
|
||||
|
||||
resource "azurerm_nat_gateway_public_ip_association" "example" {
|
||||
nat_gateway_id = azurerm_nat_gateway.gateway.id
|
||||
public_ip_address_id = azurerm_public_ip.nat_gateway_ip.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb" "loadbalancer" {
|
||||
name = local.name
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group
|
||||
sku = "Standard"
|
||||
tags = local.tags
|
||||
|
||||
dynamic "frontend_ip_configuration" {
|
||||
for_each = var.internal_load_balancer ? [] : [1]
|
||||
content {
|
||||
name = "PublicIPAddress"
|
||||
public_ip_address_id = azurerm_public_ip.loadbalancer_ip[0].id
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "frontend_ip_configuration" {
|
||||
for_each = var.internal_load_balancer ? [1] : []
|
||||
content {
|
||||
name = "PrivateIPAddress"
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
subnet_id = azurerm_subnet.loadbalancer_subnet[0].id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "loadbalancer_backend_control_plane" {
|
||||
source = "./modules/load_balancer_backend"
|
||||
|
||||
name = "${local.name}-control-plane"
|
||||
loadbalancer_id = azurerm_lb.loadbalancer.id
|
||||
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
|
||||
ports = local.ports
|
||||
}
|
||||
|
||||
module "loadbalancer_backend_worker" {
|
||||
source = "./modules/load_balancer_backend"
|
||||
|
||||
name = "${local.name}-worker"
|
||||
loadbalancer_id = azurerm_lb.loadbalancer.id
|
||||
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
|
||||
ports = []
|
||||
}
|
||||
|
||||
resource "azurerm_lb_backend_address_pool" "all" {
|
||||
loadbalancer_id = azurerm_lb.loadbalancer.id
|
||||
name = "${var.name}-all"
|
||||
}
|
||||
|
||||
resource "azurerm_virtual_network" "network" {
|
||||
name = local.name
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
address_space = ["10.0.0.0/8"]
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "azurerm_subnet" "loadbalancer_subnet" {
|
||||
count = var.internal_load_balancer ? 1 : 0
|
||||
name = "${local.name}-lb"
|
||||
resource_group_name = var.resource_group
|
||||
virtual_network_name = azurerm_virtual_network.network.name
|
||||
address_prefixes = ["10.10.0.0/16"]
|
||||
}
|
||||
|
||||
resource "azurerm_subnet" "node_subnet" {
|
||||
name = "${local.name}-node"
|
||||
resource_group_name = var.resource_group
|
||||
virtual_network_name = azurerm_virtual_network.network.name
|
||||
address_prefixes = ["10.9.0.0/16"]
|
||||
}
|
||||
|
||||
resource "azurerm_network_security_group" "security_group" {
|
||||
name = local.name
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group
|
||||
tags = local.tags
|
||||
|
||||
dynamic "security_rule" {
|
||||
for_each = concat(
|
||||
local.ports,
|
||||
[{ name = "nodeports", port = local.ports_node_range, priority = 200 }]
|
||||
)
|
||||
content {
|
||||
name = security_rule.value.name
|
||||
priority = security_rule.value.priority
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = security_rule.value.port
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "scale_set_group" {
|
||||
source = "./modules/scale_set"
|
||||
for_each = var.node_groups
|
||||
base_name = local.name
|
||||
node_group_name = each.key
|
||||
role = each.value.role
|
||||
zones = each.value.zones
|
||||
tags = merge(
|
||||
local.tags,
|
||||
{ constellation-init-secret-hash = local.initSecretHash },
|
||||
{ constellation-maa-url = var.create_maa ? azurerm_attestation_provider.attestation_provider[0].attestation_uri : "" },
|
||||
)
|
||||
|
||||
initial_count = each.value.initial_count
|
||||
state_disk_size = each.value.disk_size
|
||||
state_disk_type = each.value.disk_type
|
||||
location = var.location
|
||||
instance_type = each.value.instance_type
|
||||
confidential_vm = var.confidential_vm
|
||||
secure_boot = var.secure_boot
|
||||
resource_group = var.resource_group
|
||||
user_assigned_identity = var.user_assigned_identity
|
||||
image_id = var.image_id
|
||||
network_security_group_id = azurerm_network_security_group.security_group.id
|
||||
subnet_id = azurerm_subnet.node_subnet.id
|
||||
backend_address_pool_ids = each.value.role == "control-plane" ? [
|
||||
azurerm_lb_backend_address_pool.all.id,
|
||||
module.loadbalancer_backend_control_plane.backendpool_id
|
||||
] : [
|
||||
azurerm_lb_backend_address_pool.all.id,
|
||||
module.loadbalancer_backend_worker.backendpool_id
|
||||
]
|
||||
}
|
||||
|
||||
module "jump_host" {
|
||||
count = var.internal_load_balancer && var.debug ? 1 : 0
|
||||
source = "./modules/jump_host"
|
||||
base_name = local.name
|
||||
resource_group = var.resource_group
|
||||
location = var.location
|
||||
subnet_id = azurerm_subnet.loadbalancer_subnet[0].id
|
||||
ports = [for port in local.ports : port.port]
|
||||
lb_internal_ip = azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address
|
||||
}
|
||||
|
||||
data "azurerm_subscription" "current" {
|
||||
}
|
||||
|
||||
data "azurerm_user_assigned_identity" "uaid" {
|
||||
name = local.uai_name
|
||||
resource_group_name = local.uai_resource_group
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
resource "azurerm_linux_virtual_machine" "jump_host" {
|
||||
name = "${var.base_name}-jump-host"
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
size = "Standard_D2as_v5"
|
||||
|
||||
network_interface_ids = [
|
||||
azurerm_network_interface.jump_host.id,
|
||||
]
|
||||
|
||||
admin_username = "adminuser"
|
||||
|
||||
admin_ssh_key {
|
||||
username = "adminuser"
|
||||
public_key = tls_private_key.ssh_key.public_key_openssh
|
||||
}
|
||||
|
||||
os_disk {
|
||||
caching = "ReadWrite"
|
||||
storage_account_type = "Standard_LRS"
|
||||
}
|
||||
|
||||
source_image_reference {
|
||||
publisher = "Canonical"
|
||||
offer = "0001-com-ubuntu-server-jammy"
|
||||
sku = "22_04-lts-gen2"
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
boot_diagnostics {
|
||||
|
||||
}
|
||||
|
||||
user_data = base64encode(<<EOF
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
# Uncomment to create user with password
|
||||
# useradd -m user
|
||||
# usermod -aG sudo user
|
||||
# usermod --shell /bin/bash user
|
||||
# sh -c "echo \"user:pass\" | chpasswd"
|
||||
|
||||
sysctl -w net.ipv4.ip_forward=1
|
||||
sysctl -p
|
||||
|
||||
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
|
||||
lb_ip=${var.lb_internal_ip}
|
||||
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
lb_ip=$(dig +short ${var.lb_internal_ip})
|
||||
fi
|
||||
|
||||
%{for port in var.ports~}
|
||||
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination $${lb_ip}:${port}
|
||||
iptables -t nat -A POSTROUTING -p tcp -d $${lb_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
|
||||
%{endfor~}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface" "jump_host" {
|
||||
name = "${var.base_name}-jump-host"
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
|
||||
ip_configuration {
|
||||
name = "public"
|
||||
subnet_id = var.subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
public_ip_address_id = azurerm_public_ip.jump_host.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "jump_host" {
|
||||
name = "${var.base_name}-jump-host"
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
allocation_method = "Dynamic"
|
||||
}
|
||||
|
||||
resource "tls_private_key" "ssh_key" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = 4096
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
output "ip" {
|
||||
value = azurerm_linux_virtual_machine.jump_host.public_ip_address
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
variable "base_name" {
|
||||
description = "Base name of the jump host"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ports" {
|
||||
description = "Ports to forward to the load balancer"
|
||||
type = list(number)
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
description = "Resource group name to deploy the jump host into"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Location to deploy the jump host into"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "Subnet ID to deploy the jump host into"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "lb_internal_ip" {
|
||||
description = "Internal IP of the load balancer"
|
||||
type = string
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "3.74.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_lb_backend_address_pool" "backend_pool" {
|
||||
loadbalancer_id = var.loadbalancer_id
|
||||
name = var.name
|
||||
}
|
||||
|
||||
resource "azurerm_lb_probe" "health_probes" {
|
||||
for_each = { for port in var.ports : port.name => port }
|
||||
|
||||
loadbalancer_id = var.loadbalancer_id
|
||||
name = each.value.name
|
||||
port = each.value.port
|
||||
protocol = each.value.health_check_protocol
|
||||
request_path = each.value.path
|
||||
interval_in_seconds = 5
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "rules" {
|
||||
for_each = azurerm_lb_probe.health_probes
|
||||
|
||||
loadbalancer_id = var.loadbalancer_id
|
||||
name = each.value.name
|
||||
protocol = "Tcp"
|
||||
frontend_port = each.value.port
|
||||
backend_port = each.value.port
|
||||
frontend_ip_configuration_name = var.frontend_ip_configuration_name
|
||||
backend_address_pool_ids = [azurerm_lb_backend_address_pool.backend_pool.id]
|
||||
probe_id = each.value.id
|
||||
disable_outbound_snat = true
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
output "backendpool_id" {
|
||||
value = azurerm_lb_backend_address_pool.backend_pool.id
|
||||
description = "The ID of the created backend pool."
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
default = "constell"
|
||||
description = "Base name of the cluster."
|
||||
}
|
||||
|
||||
variable "frontend_ip_configuration_name" {
|
||||
type = string
|
||||
description = "The name of the frontend IP configuration to use for the load balancer."
|
||||
}
|
||||
|
||||
variable "loadbalancer_id" {
|
||||
type = string
|
||||
description = "The ID of the load balancer to add the backend to."
|
||||
}
|
||||
|
||||
variable "ports" {
|
||||
type = list(object({
|
||||
name = string
|
||||
port = number
|
||||
health_check_protocol = string
|
||||
path = string
|
||||
}))
|
||||
description = "The ports to add to the backend. Protocol can be either 'Tcp' or 'Https'. Path is only used for 'Https' protocol and can otherwise be null."
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "3.74.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{ constellation-role = var.role },
|
||||
{ constellation-node-group = var.node_group_name },
|
||||
)
|
||||
group_uid = random_id.uid.hex
|
||||
name = "${var.base_name}-${var.role}-${local.group_uid}"
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
min_lower = 1
|
||||
min_upper = 1
|
||||
min_numeric = 1
|
||||
min_special = 1
|
||||
}
|
||||
|
||||
resource "azurerm_linux_virtual_machine_scale_set" "scale_set" {
|
||||
name = local.name
|
||||
resource_group_name = var.resource_group
|
||||
location = var.location
|
||||
sku = var.instance_type
|
||||
instances = var.initial_count
|
||||
admin_username = "adminuser"
|
||||
admin_password = random_password.password.result
|
||||
overprovision = false
|
||||
provision_vm_agent = false
|
||||
vtpm_enabled = true
|
||||
disable_password_authentication = false
|
||||
upgrade_mode = "Manual"
|
||||
secure_boot_enabled = var.secure_boot
|
||||
source_image_id = var.image_id
|
||||
tags = local.tags
|
||||
zones = var.zones
|
||||
identity {
|
||||
type = "UserAssigned"
|
||||
identity_ids = [var.user_assigned_identity]
|
||||
}
|
||||
|
||||
boot_diagnostics {}
|
||||
|
||||
dynamic "os_disk" {
|
||||
for_each = var.confidential_vm ? [1] : [] # if confidential_vm is true
|
||||
content {
|
||||
security_encryption_type = "VMGuestStateOnly"
|
||||
caching = "ReadWrite"
|
||||
storage_account_type = "Premium_LRS"
|
||||
}
|
||||
}
|
||||
dynamic "os_disk" {
|
||||
for_each = var.confidential_vm ? [] : [1] # else
|
||||
content {
|
||||
caching = "ReadWrite"
|
||||
storage_account_type = "Premium_LRS"
|
||||
}
|
||||
}
|
||||
|
||||
data_disk {
|
||||
storage_account_type = var.state_disk_type
|
||||
disk_size_gb = var.state_disk_size
|
||||
caching = "ReadWrite"
|
||||
lun = 0
|
||||
}
|
||||
|
||||
network_interface {
|
||||
name = "node-network"
|
||||
primary = true
|
||||
network_security_group_id = var.network_security_group_id
|
||||
|
||||
ip_configuration {
|
||||
name = "node-network"
|
||||
primary = true
|
||||
subnet_id = var.subnet_id
|
||||
load_balancer_backend_address_pool_ids = var.backend_address_pool_ids
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
name, # required. Allow legacy scale sets to keep their old names
|
||||
instances, # required. autoscaling modifies the instance count externally
|
||||
source_image_id, # required. update procedure modifies the image id externally
|
||||
]
|
||||
}
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
variable "base_name" {
|
||||
type = string
|
||||
description = "Base name of the instance group."
|
||||
}
|
||||
|
||||
variable "node_group_name" {
|
||||
type = string
|
||||
description = "Constellation name for the node group (used for configuration and CSP-independent naming)."
|
||||
}
|
||||
|
||||
variable "role" {
|
||||
type = string
|
||||
description = "The role of the instance group."
|
||||
validation {
|
||||
condition = contains(["control-plane", "worker"], var.role)
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
description = "Tags to include in the scale_set."
|
||||
}
|
||||
|
||||
variable "zones" {
|
||||
type = list(string)
|
||||
description = "List of availability zones."
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "initial_count" {
|
||||
type = number
|
||||
description = "The number of instances in this scale set."
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
type = string
|
||||
description = "The Azure instance type to deploy."
|
||||
}
|
||||
|
||||
variable "state_disk_size" {
|
||||
type = number
|
||||
default = 30
|
||||
description = "The size of the state disk in GB."
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
type = string
|
||||
description = "The name of the Azure resource group to create the Constellation cluster in."
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
type = string
|
||||
description = "The Azure location to deploy the cluster in."
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
type = string
|
||||
description = "The image to use for the cluster nodes."
|
||||
}
|
||||
|
||||
variable "user_assigned_identity" {
|
||||
type = string
|
||||
description = "The name of the user assigned identity to attache to the nodes of the cluster."
|
||||
}
|
||||
|
||||
variable "state_disk_type" {
|
||||
type = string
|
||||
default = "Premium_LRS"
|
||||
description = "The type of the state disk."
|
||||
}
|
||||
|
||||
variable "network_security_group_id" {
|
||||
type = string
|
||||
description = "The ID of the network security group to use for the scale set."
|
||||
}
|
||||
|
||||
variable "backend_address_pool_ids" {
|
||||
type = list(string)
|
||||
description = "The IDs of the backend address pools to use for the scale set."
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
type = string
|
||||
description = "The ID of the subnet to use for the scale set."
|
||||
}
|
||||
|
||||
variable "confidential_vm" {
|
||||
type = bool
|
||||
default = true
|
||||
description = "Whether to deploy the cluster nodes as confidential VMs."
|
||||
}
|
||||
|
||||
variable "secure_boot" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Whether to deploy the cluster nodes with secure boot."
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
output "out_of_cluster_endpoint" {
|
||||
value = local.out_of_cluster_endpoint
|
||||
}
|
||||
|
||||
output "in_cluster_endpoint" {
|
||||
value = local.in_cluster_endpoint
|
||||
}
|
||||
|
||||
output "api_server_cert_sans" {
|
||||
value = sort(
|
||||
distinct(
|
||||
concat(
|
||||
[
|
||||
local.in_cluster_endpoint,
|
||||
local.out_of_cluster_endpoint,
|
||||
],
|
||||
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
|
||||
var.internal_load_balancer ? [] : [local.wildcard_lb_dns_name],
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
output "uid" {
|
||||
value = local.uid
|
||||
}
|
||||
|
||||
output "initSecret" {
|
||||
value = random_password.initSecret.result
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "attestationURL" {
|
||||
value = var.create_maa ? azurerm_attestation_provider.attestation_provider[0].attestation_uri : ""
|
||||
}
|
||||
|
||||
output "network_security_group_name" {
|
||||
value = azurerm_network_security_group.security_group.name
|
||||
}
|
||||
|
||||
output "loadbalancer_name" {
|
||||
value = azurerm_lb.loadbalancer.name
|
||||
}
|
||||
|
||||
|
||||
output "user_assigned_identity_client_id" {
|
||||
value = data.azurerm_user_assigned_identity.uaid.client_id
|
||||
}
|
||||
|
||||
output "resource_group" {
|
||||
value = var.resource_group
|
||||
}
|
||||
|
||||
output "subscription_id" {
|
||||
value = data.azurerm_subscription.current.subscription_id
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = local.name
|
||||
}
|
||||
|
||||
output "ip_cidr_nodes" {
|
||||
value = local.cidr_vpc_subnet_nodes
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Base name of the cluster."
|
||||
}
|
||||
|
||||
variable "node_groups" {
|
||||
type = map(object({
|
||||
role = string
|
||||
initial_count = optional(number)
|
||||
instance_type = string
|
||||
disk_size = number
|
||||
disk_type = string
|
||||
zones = optional(list(string))
|
||||
}))
|
||||
description = "A map of node group names to node group configurations."
|
||||
validation {
|
||||
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
type = string
|
||||
description = "The Azure location to deploy the cluster in."
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
type = string
|
||||
description = "The image to use for the cluster nodes."
|
||||
}
|
||||
|
||||
variable "create_maa" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Whether to create a Microsoft Azure attestation provider."
|
||||
}
|
||||
|
||||
variable "debug" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Enable debug mode. This opens up a debugd port that can be used to deploy a custom bootstrapper."
|
||||
}
|
||||
|
||||
variable "confidential_vm" {
|
||||
type = bool
|
||||
default = true
|
||||
description = "Whether to deploy the cluster nodes as confidential VMs."
|
||||
}
|
||||
|
||||
variable "secure_boot" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Whether to deploy the cluster nodes with secure boot."
|
||||
}
|
||||
|
||||
variable "resource_group" {
|
||||
type = string
|
||||
description = "The name of the Azure resource group to create the Constellation cluster in."
|
||||
}
|
||||
variable "user_assigned_identity" {
|
||||
type = string
|
||||
description = "The name of the user assigned identity to attach to the nodes of the cluster. Should be of format: /subscriptions/$ID/resourceGroups/$RG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$NAME"
|
||||
}
|
||||
|
||||
variable "custom_endpoint" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
|
||||
}
|
||||
|
||||
variable "internal_load_balancer" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Whether to use an internal load balancer for the Constellation."
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/google" {
|
||||
version = "4.83.0"
|
||||
constraints = "4.83.0"
|
||||
hashes = [
|
||||
"h1:04Dbo1eT5GovugyMTr78SetNLLXBVhzMeo67Noyu85o=",
|
||||
"h1:BOrMAGh1FwA290rqwOHJKdfYOhOyqcKiqunZ6K/qA6k=",
|
||||
"h1:QXESvZlpwchznnilwGfL5nwbYTNlJLl4RyV5TXjKZVY=",
|
||||
"h1:SmoEOGxSXmrWceJP4YVmpgdsnEk01OCZhwEUUViy0c0=",
|
||||
"h1:cWBKJt7QJ+MKerSq73qFICJkIsxHn1JepalZzR/eRk4=",
|
||||
"h1:dPId6xBo8+uET30DqkB400hKbMGR60NoxMkw1FFzvjA=",
|
||||
"h1:jvTOwFMz4iyq/4AjU6QjTOlL5R0etYt98tC7D/6eE1M=",
|
||||
"h1:lvCQfxljF0bY15qI78bxl9d1pW6o60WcyNp9ZQcx3DU=",
|
||||
"h1:nyeDdFmfYBFj3+Ng6IwfdSgo+D4fsCAbbTPmwPidQC8=",
|
||||
"h1:qx6znUIkV7pzjp1MgoLLUT+3hyv5zYbSdVho+JUUBKk=",
|
||||
"h1:x9rGt85+aTXPVhTtNJ4bdV5Wy3uJDJbVg+D0e0h/uiY=",
|
||||
"zh:0310360982c3d42449ef103fab0819770aa96c7813507778d71ed016942bed96",
|
||||
"zh:0d0f82ce5e54267641b1f1d494a3ad1ddd41a7553910dd33abd6a114feab6881",
|
||||
"zh:0eda79e53a1833e8692273f5d7224344200e49303e579aec7b53762f50f39210",
|
||||
"zh:3c0cf4abaf461238563132ab4564965bc6bd571eb3bbeedac89258a9a688b169",
|
||||
"zh:61d619e5163daeeb7909443cc0c67816939a1748aec2fe544ab3f380270aae92",
|
||||
"zh:66d9da66aec8575ee16b70b42a5ae082b2f43f4a84a844363a585806ac75cca0",
|
||||
"zh:875c5596f365130095ccc2150755b6fb8a6d9fe9af4af9f595029716be02cdef",
|
||||
"zh:a9af92cd6ea160618d6433c92297a4e3f3dc7a2e964516e1e7b51ce70f3ec178",
|
||||
"zh:b9566bd1910462b4d92c6976184c4408e42a3ef6a300962b49866aa0f6f29b11",
|
||||
"zh:bae735a81a04244893fd9e81d9b5d6c321d874cb37a7b5aab8a1c8c5044b362d",
|
||||
"zh:d97ae1676d793696498e0eda8324bc02edbd2fbbcd76eb103a949876ec1fe8c0",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/google-beta" {
|
||||
version = "4.83.0"
|
||||
constraints = "4.83.0"
|
||||
hashes = [
|
||||
"h1:3NSnmqqgbaGSbpiMzxTZJTdCoGH6jqUUktjrwPr+NgE=",
|
||||
"h1:900wOs1aRWQpEhv0058PEZCWk40ywG6NqHwuFlw+/p4=",
|
||||
"h1:Ewmi/ROl5YEvLf8BHgGrnlVkxjlia0fKyXLSFkcmGps=",
|
||||
"h1:IX3g+ndU9l8BQ/qU13yDk4vQuTxxUvyhYXBSTXmu1SQ=",
|
||||
"h1:J8MwreN/KrmeOWCVjbCm749EdeD/WnngXRIxPNbIBH4=",
|
||||
"h1:Y5OvzqSSPnELV+N5bPShZ2cjFqEynGoBRFFmf3F1M1U=",
|
||||
"h1:hxulmxS/QJyusZNl53N7bjwhVShQo7JxGuq5Tht08ZE=",
|
||||
"h1:qTXF/bRgloSMKhhzypno+9qP6Eno6qmNfEt9b5eMXRE=",
|
||||
"h1:tfTOCk0TCOeGfyeh8HX7MC2aYcsidgRykK9Wfqn1o8k=",
|
||||
"h1:uKmM3fJQyowwBV5qlAl4+qteXbsCEkwmGAwxaci+9cw=",
|
||||
"h1:uNQaNKcKbbU0uF3tHWEfGwqnG00oGX3bIi8aQe+ITFI=",
|
||||
"zh:006d2f02999598109ab0c6737495904e83bb78008defc7590d18d4a997dc7cbf",
|
||||
"zh:04455b025c1a5551187495125dd045d3c11334dcb68cc0c62d82574513f42eab",
|
||||
"zh:0b20f658e322c561bc6364240bc4169971e00efbbba8781b38c18dcf014e0788",
|
||||
"zh:2262b2ceb759427a0ec7fe994dd07fd1ee7c3cae2b1d87ef55aa7f005ffb6c52",
|
||||
"zh:3cf502334354b75334ff5b4285b2afcbef11b91c7cf1e18e16c2b1bb5a77e099",
|
||||
"zh:9469a3356b543894273beb2332cbf8f230cdbe810d5e3d18de3a461a726a20b2",
|
||||
"zh:968914382e310d0b41c012ec6435d796c40f5b95239f68ed8aad24be4dc705d6",
|
||||
"zh:aa70ee3f4dd1f433b965049f58c93c47e2f7f31cfd7848ec88afad71d14f7038",
|
||||
"zh:d2aa8fceb732886c2c80ff17237a6184fb3e7806e0280d4a1ab0e3d4a83b8fa9",
|
||||
"zh:e470be740b1854a157c1ff5d4f12a13548842135f0fddc1eda29571dc7c65327",
|
||||
"zh:e51e894c0bc9d9982de9ae42c1434c5397f77db41bb7e095996a715315018874",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.5.1"
|
||||
constraints = "3.5.1"
|
||||
hashes = [
|
||||
"h1:0ULxM8/DscMzfiDWg1yclBf/39U44wQmlx745BfYZ80=",
|
||||
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
|
||||
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
|
||||
"h1:6ePAACdONiMGe1j5pwUc0gpDFt82y/ka0zRimMg/geM=",
|
||||
"h1:BD3Y4CcrGHb9sx+Bl5V8M2PSyw23mykzXSwj+/6FhHA=",
|
||||
"h1:HGeb7Tajn7HZwX0MhrdyL57LoCSz5GMcI2wbHs12D4U=",
|
||||
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
|
||||
"h1:JiENkIxSWc32/2Dtd1n4CWY3ow/PHvAeGhdgcOLpWZM=",
|
||||
"h1:MROYZuKGTuaTNf2FgbwCgSVpteQW25ubnb+Xfok2jvk=",
|
||||
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
|
||||
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
|
||||
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
|
||||
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
|
||||
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
|
||||
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
|
||||
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
|
||||
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
|
||||
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
|
||||
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
|
||||
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
|
||||
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
|
||||
]
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.83.0"
|
||||
}
|
||||
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
|
||||
google-beta = {
|
||||
source = "hashicorp/google-beta"
|
||||
version = "4.83.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
project = var.project
|
||||
region = var.region
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
provider "google-beta" {
|
||||
project = var.project
|
||||
region = var.region
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
locals {
|
||||
uid = random_id.uid.hex
|
||||
name = "${var.name}-${local.uid}"
|
||||
initSecretHash = random_password.initSecret.bcrypt_hash
|
||||
labels = {
|
||||
constellation-uid = local.uid,
|
||||
}
|
||||
ports_node_range = "30000-32767"
|
||||
cidr_vpc_subnet_nodes = "192.168.178.0/24"
|
||||
cidr_vpc_subnet_pods = "10.10.0.0/16"
|
||||
cidr_vpc_subnet_proxy = "192.168.179.0/24"
|
||||
cidr_vpc_subnet_ilb = "192.168.180.0/24"
|
||||
kube_env = "AUTOSCALER_ENV_VARS: kube_reserved=cpu=1060m,memory=1019Mi,ephemeral-storage=41Gi;node_labels=;os=linux;os_distribution=cos;evictionHard="
|
||||
control_plane_named_ports = flatten([
|
||||
{ name = "kubernetes", port = "6443", health_check = "HTTPS" },
|
||||
{ name = "bootstrapper", port = "9000", health_check = "TCP" },
|
||||
{ name = "verify", port = "30081", health_check = "TCP" },
|
||||
{ name = "konnectivity", port = "8132", health_check = "TCP" },
|
||||
{ name = "recovery", port = "9999", health_check = "TCP" },
|
||||
{ name = "join", port = "30090", health_check = "TCP" },
|
||||
var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [],
|
||||
])
|
||||
node_groups_by_role = {
|
||||
for name, node_group in var.node_groups : node_group.role => name...
|
||||
}
|
||||
control_plane_instance_groups = [
|
||||
for control_plane in local.node_groups_by_role["control-plane"] : module.instance_group[control_plane].instance_group
|
||||
]
|
||||
in_cluster_endpoint = var.internal_load_balancer ? google_compute_address.loadbalancer_ip_internal[0].address : google_compute_global_address.loadbalancer_ip[0].address
|
||||
out_of_cluster_endpoint = var.debug && var.internal_load_balancer ? module.jump_host[0].ip : local.in_cluster_endpoint
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
resource "random_password" "initSecret" {
|
||||
length = 32
|
||||
special = true
|
||||
override_special = "_%@"
|
||||
}
|
||||
|
||||
resource "google_compute_network" "vpc_network" {
|
||||
name = local.name
|
||||
description = "Constellation VPC network"
|
||||
auto_create_subnetworks = false
|
||||
mtu = 8896
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "vpc_subnetwork" {
|
||||
name = local.name
|
||||
description = "Constellation VPC subnetwork"
|
||||
network = google_compute_network.vpc_network.id
|
||||
ip_cidr_range = local.cidr_vpc_subnet_nodes
|
||||
secondary_ip_range = [
|
||||
{
|
||||
range_name = local.name,
|
||||
ip_cidr_range = local.cidr_vpc_subnet_pods,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
resource "google_compute_subnetwork" "proxy_subnet" {
|
||||
count = var.internal_load_balancer ? 1 : 0
|
||||
name = "${local.name}-proxy"
|
||||
ip_cidr_range = local.cidr_vpc_subnet_proxy
|
||||
region = var.region
|
||||
purpose = "REGIONAL_MANAGED_PROXY"
|
||||
role = "ACTIVE"
|
||||
network = google_compute_network.vpc_network.id
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "ilb_subnet" {
|
||||
count = var.internal_load_balancer ? 1 : 0
|
||||
name = "${local.name}-ilb"
|
||||
ip_cidr_range = local.cidr_vpc_subnet_ilb
|
||||
region = var.region
|
||||
network = google_compute_network.vpc_network.id
|
||||
depends_on = [google_compute_subnetwork.proxy_subnet]
|
||||
}
|
||||
|
||||
resource "google_compute_router" "vpc_router" {
|
||||
name = local.name
|
||||
description = "Constellation VPC router"
|
||||
network = google_compute_network.vpc_network.id
|
||||
}
|
||||
|
||||
resource "google_compute_router_nat" "vpc_router_nat" {
|
||||
name = local.name
|
||||
router = google_compute_router.vpc_router.name
|
||||
nat_ip_allocate_option = "AUTO_ONLY"
|
||||
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "firewall_external" {
|
||||
name = local.name
|
||||
description = "Constellation VPC firewall"
|
||||
network = google_compute_network.vpc_network.id
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
direction = "INGRESS"
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = flatten([
|
||||
[for port in local.control_plane_named_ports : port.port],
|
||||
[local.ports_node_range],
|
||||
var.internal_load_balancer ? [22] : [],
|
||||
])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "firewall_internal_nodes" {
|
||||
name = "${local.name}-nodes"
|
||||
description = "Constellation VPC firewall"
|
||||
network = google_compute_network.vpc_network.id
|
||||
source_ranges = [local.cidr_vpc_subnet_nodes]
|
||||
direction = "INGRESS"
|
||||
|
||||
allow { protocol = "tcp" }
|
||||
allow { protocol = "udp" }
|
||||
allow { protocol = "icmp" }
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "firewall_internal_pods" {
|
||||
name = "${local.name}-pods"
|
||||
description = "Constellation VPC firewall"
|
||||
network = google_compute_network.vpc_network.id
|
||||
source_ranges = [local.cidr_vpc_subnet_pods]
|
||||
direction = "INGRESS"
|
||||
|
||||
allow { protocol = "tcp" }
|
||||
allow { protocol = "udp" }
|
||||
allow { protocol = "icmp" }
|
||||
}
|
||||
|
||||
|
||||
module "instance_group" {
|
||||
source = "./modules/instance_group"
|
||||
for_each = var.node_groups
|
||||
base_name = local.name
|
||||
node_group_name = each.key
|
||||
role = each.value.role
|
||||
zone = each.value.zone
|
||||
uid = local.uid
|
||||
instance_type = each.value.instance_type
|
||||
initial_count = each.value.initial_count
|
||||
image_id = var.image_id
|
||||
disk_size = each.value.disk_size
|
||||
disk_type = each.value.disk_type
|
||||
network = google_compute_network.vpc_network.id
|
||||
subnetwork = google_compute_subnetwork.vpc_subnetwork.id
|
||||
alias_ip_range_name = google_compute_subnetwork.vpc_subnetwork.secondary_ip_range[0].range_name
|
||||
kube_env = local.kube_env
|
||||
debug = var.debug
|
||||
named_ports = each.value.role == "control-plane" ? local.control_plane_named_ports : []
|
||||
labels = local.labels
|
||||
init_secret_hash = local.initSecretHash
|
||||
custom_endpoint = var.custom_endpoint
|
||||
}
|
||||
|
||||
resource "google_compute_address" "loadbalancer_ip_internal" {
|
||||
count = var.internal_load_balancer ? 1 : 0
|
||||
name = local.name
|
||||
region = var.region
|
||||
subnetwork = google_compute_subnetwork.ilb_subnet[0].id
|
||||
purpose = "SHARED_LOADBALANCER_VIP"
|
||||
address_type = "INTERNAL"
|
||||
}
|
||||
|
||||
resource "google_compute_global_address" "loadbalancer_ip" {
|
||||
count = var.internal_load_balancer ? 0 : 1
|
||||
name = local.name
|
||||
}
|
||||
|
||||
module "loadbalancer_public" {
|
||||
// for every port in control_plane_named_ports if internal lb is disabled
|
||||
for_each = var.internal_load_balancer ? {} : { for port in local.control_plane_named_ports : port.name => port }
|
||||
source = "./modules/loadbalancer"
|
||||
name = local.name
|
||||
backend_port_name = each.value.name
|
||||
port = each.value.port
|
||||
health_check = each.value.health_check
|
||||
backend_instance_groups = local.control_plane_instance_groups
|
||||
ip_address = google_compute_global_address.loadbalancer_ip[0].self_link
|
||||
frontend_labels = merge(local.labels, { constellation-use = each.value.name })
|
||||
}
|
||||
|
||||
module "loadbalancer_internal" {
|
||||
for_each = var.internal_load_balancer ? { for port in local.control_plane_named_ports : port.name => port } : {}
|
||||
source = "./modules/internal_load_balancer"
|
||||
name = local.name
|
||||
backend_port_name = each.value.name
|
||||
port = each.value.port
|
||||
health_check = each.value.health_check
|
||||
backend_instance_group = local.control_plane_instance_groups[0]
|
||||
ip_address = google_compute_address.loadbalancer_ip_internal[0].self_link
|
||||
frontend_labels = merge(local.labels, { constellation-use = each.value.name })
|
||||
|
||||
region = var.region
|
||||
network = google_compute_network.vpc_network.id
|
||||
backend_subnet = google_compute_subnetwork.ilb_subnet[0].id
|
||||
}
|
||||
|
||||
module "jump_host" {
|
||||
count = var.internal_load_balancer && var.debug ? 1 : 0
|
||||
source = "./modules/jump_host"
|
||||
base_name = local.name
|
||||
zone = var.zone
|
||||
subnetwork = google_compute_subnetwork.vpc_subnetwork.id
|
||||
labels = local.labels
|
||||
lb_internal_ip = google_compute_address.loadbalancer_ip_internal[0].address
|
||||
ports = [for port in local.control_plane_named_ports : port.port]
|
||||
}
|
||||
moved {
|
||||
from = module.loadbalancer_boot
|
||||
to = module.loadbalancer_public["bootstrapper"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.loadbalancer_kube
|
||||
to = module.loadbalancer_public["kubernetes"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.loadbalancer_verify
|
||||
to = module.loadbalancer_public["verify"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.loadbalancer_konnectivity
|
||||
to = module.loadbalancer_public["konnectivity"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.loadbalancer_recovery
|
||||
to = module.loadbalancer_public["recovery"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.loadbalancer_debugd[0]
|
||||
to = module.loadbalancer_public["debugd"]
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.83.0"
|
||||
}
|
||||
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
group_uid = random_id.uid.hex
|
||||
name = "${var.base_name}-${var.role}-${local.group_uid}"
|
||||
state_disk_name = "state-disk"
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "template" {
|
||||
name = local.name
|
||||
machine_type = var.instance_type
|
||||
tags = ["constellation-${var.uid}"] // Note that this is also applied as a label
|
||||
labels = merge(var.labels, {
|
||||
constellation-role = var.role,
|
||||
constellation-node-group = var.node_group_name,
|
||||
})
|
||||
|
||||
confidential_instance_config {
|
||||
enable_confidential_compute = true
|
||||
}
|
||||
|
||||
disk {
|
||||
disk_size_gb = 10
|
||||
source_image = var.image_id
|
||||
auto_delete = true
|
||||
boot = true
|
||||
mode = "READ_WRITE"
|
||||
}
|
||||
|
||||
disk {
|
||||
disk_size_gb = var.disk_size
|
||||
disk_type = var.disk_type
|
||||
auto_delete = true
|
||||
device_name = local.state_disk_name // This name is used by disk mapper to find the disk
|
||||
boot = false
|
||||
mode = "READ_WRITE"
|
||||
type = "PERSISTENT"
|
||||
}
|
||||
|
||||
metadata = {
|
||||
kube-env = var.kube_env
|
||||
constellation-init-secret-hash = var.init_secret_hash
|
||||
serial-port-enable = var.debug ? "TRUE" : "FALSE"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = var.network
|
||||
subnetwork = var.subnetwork
|
||||
alias_ip_range {
|
||||
ip_cidr_range = "/24"
|
||||
subnetwork_range_name = var.alias_ip_range_name
|
||||
}
|
||||
}
|
||||
|
||||
scheduling {
|
||||
on_host_maintenance = "TERMINATE"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = [
|
||||
"https://www.googleapis.com/auth/compute",
|
||||
"https://www.googleapis.com/auth/servicecontrol",
|
||||
"https://www.googleapis.com/auth/service.management.readonly",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
"https://www.googleapis.com/auth/trace.append",
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
]
|
||||
}
|
||||
|
||||
shielded_instance_config {
|
||||
enable_secure_boot = false
|
||||
enable_vtpm = true
|
||||
enable_integrity_monitoring = true
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
name, # required. legacy instance templates used different naming scheme
|
||||
tags,
|
||||
labels,
|
||||
disk, # required. update procedure modifies the instance template externally
|
||||
metadata,
|
||||
network_interface,
|
||||
scheduling,
|
||||
service_account,
|
||||
shielded_instance_config,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "instance_group_manager" {
|
||||
provider = google-beta
|
||||
name = local.name
|
||||
description = "Instance group manager for Constellation"
|
||||
base_instance_name = local.name
|
||||
zone = var.zone
|
||||
target_size = var.initial_count
|
||||
|
||||
dynamic "stateful_disk" {
|
||||
for_each = var.role == "control-plane" ? [1] : []
|
||||
content {
|
||||
device_name = local.state_disk_name
|
||||
delete_rule = "ON_PERMANENT_INSTANCE_DELETION"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "stateful_internal_ip" {
|
||||
for_each = var.role == "control-plane" ? [1] : []
|
||||
content {
|
||||
interface_name = "nic0"
|
||||
delete_rule = "ON_PERMANENT_INSTANCE_DELETION"
|
||||
}
|
||||
}
|
||||
|
||||
version {
|
||||
instance_template = google_compute_instance_template.template.id
|
||||
}
|
||||
|
||||
dynamic "named_port" {
|
||||
for_each = toset(var.named_ports)
|
||||
content {
|
||||
name = named_port.value.name
|
||||
port = named_port.value.port
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
name, # required. legacy instance templates used different naming scheme
|
||||
base_instance_name, # required. legacy instance templates used different naming scheme
|
||||
target_size, # required. autoscaling modifies the instance count externally
|
||||
version, # required. update procedure modifies the instance template externally
|
||||
]
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
output "instance_group" {
|
||||
value = google_compute_instance_group_manager.instance_group_manager.instance_group
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
variable "base_name" {
|
||||
type = string
|
||||
description = "Base name of the instance group."
|
||||
}
|
||||
|
||||
variable "node_group_name" {
|
||||
type = string
|
||||
description = "Constellation name for the node group (used for configuration and CSP-independent naming)."
|
||||
}
|
||||
|
||||
variable "role" {
|
||||
type = string
|
||||
description = "The role of the instance group."
|
||||
validation {
|
||||
condition = contains(["control-plane", "worker"], var.role)
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "uid" {
|
||||
type = string
|
||||
description = "UID of the cluster. This is used for tags."
|
||||
}
|
||||
|
||||
variable "labels" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "Labels to apply to the instance group."
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
type = string
|
||||
description = "Instance type for the nodes."
|
||||
}
|
||||
|
||||
variable "initial_count" {
|
||||
type = number
|
||||
description = "Number of instances in the instance group."
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
type = string
|
||||
description = "Image ID for the nodes."
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = number
|
||||
description = "Disk size for the nodes, in GB."
|
||||
}
|
||||
|
||||
variable "disk_type" {
|
||||
type = string
|
||||
description = "Disk type for the nodes. Has to be 'pd-standard' or 'pd-ssd'."
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
type = string
|
||||
description = "Name of the network to use."
|
||||
}
|
||||
|
||||
variable "subnetwork" {
|
||||
type = string
|
||||
description = "Name of the subnetwork to use."
|
||||
}
|
||||
|
||||
variable "kube_env" {
|
||||
type = string
|
||||
description = "Kubernetes env."
|
||||
}
|
||||
|
||||
variable "init_secret_hash" {
|
||||
type = string
|
||||
description = "Hash of the init secret."
|
||||
}
|
||||
|
||||
variable "named_ports" {
|
||||
type = list(object({ name = string, port = number }))
|
||||
default = []
|
||||
description = "Named ports for the instance group."
|
||||
}
|
||||
|
||||
variable "debug" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Enable debug mode. This will enable serial port access on the instances."
|
||||
}
|
||||
|
||||
variable "alias_ip_range_name" {
|
||||
type = string
|
||||
description = "Name of the alias IP range to use."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "Zone to deploy the instance group in."
|
||||
}
|
||||
|
||||
variable "custom_endpoint" {
|
||||
type = string
|
||||
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.83.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "${var.name}-${var.backend_port_name}"
|
||||
}
|
||||
|
||||
resource "google_compute_region_health_check" "health" {
|
||||
name = local.name
|
||||
region = var.region
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
dynamic "tcp_health_check" {
|
||||
for_each = var.health_check == "TCP" ? [1] : []
|
||||
content {
|
||||
port = var.port
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "https_health_check" {
|
||||
for_each = var.health_check == "HTTPS" ? [1] : []
|
||||
content {
|
||||
host = ""
|
||||
port = var.port
|
||||
request_path = "/readyz"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_region_backend_service" "backend" {
|
||||
name = local.name
|
||||
protocol = "TCP"
|
||||
load_balancing_scheme = "INTERNAL_MANAGED"
|
||||
health_checks = [google_compute_region_health_check.health.id]
|
||||
port_name = var.backend_port_name
|
||||
timeout_sec = 240
|
||||
region = var.region
|
||||
|
||||
backend {
|
||||
group = var.backend_instance_group
|
||||
balancing_mode = "UTILIZATION"
|
||||
capacity_scaler = 1.0
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_region_target_tcp_proxy" "proxy" {
|
||||
name = local.name
|
||||
region = var.region
|
||||
backend_service = google_compute_region_backend_service.backend.id
|
||||
}
|
||||
|
||||
# forwarding rule
|
||||
resource "google_compute_forwarding_rule" "forwarding" {
|
||||
name = local.name
|
||||
network = var.network
|
||||
subnetwork = var.backend_subnet
|
||||
region = var.region
|
||||
ip_address = var.ip_address
|
||||
ip_protocol = "TCP"
|
||||
load_balancing_scheme = "INTERNAL_MANAGED"
|
||||
port_range = var.port
|
||||
allow_global_access = true
|
||||
target = google_compute_region_target_tcp_proxy.proxy.id
|
||||
labels = var.frontend_labels
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Base name of the load balancer."
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "The region where the load balancer will be created."
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
type = string
|
||||
description = "The network to which all network resources will be attached."
|
||||
}
|
||||
|
||||
variable "backend_subnet" {
|
||||
type = string
|
||||
description = "The subnet to which all backend network resources will be attached."
|
||||
}
|
||||
|
||||
variable "health_check" {
|
||||
type = string
|
||||
description = "The type of the health check. 'HTTPS' or 'TCP'."
|
||||
validation {
|
||||
condition = contains(["HTTPS", "TCP"], var.health_check)
|
||||
error_message = "Health check must be either 'HTTPS' or 'TCP'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "port" {
|
||||
type = string
|
||||
description = "The port on which to listen for incoming traffic."
|
||||
}
|
||||
|
||||
variable "backend_port_name" {
|
||||
type = string
|
||||
description = "Name of backend port. The same name should appear in the instance groups referenced by this service."
|
||||
}
|
||||
|
||||
variable "backend_instance_group" {
|
||||
type = string
|
||||
description = "The URL of the instance group resource from which the load balancer will direct traffic."
|
||||
}
|
||||
|
||||
variable "ip_address" {
|
||||
type = string
|
||||
description = "The IP address that this forwarding rule serves."
|
||||
}
|
||||
|
||||
variable "frontend_labels" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "Labels to apply to the forwarding rule."
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.83.0"
|
||||
}
|
||||
|
||||
google-beta = {
|
||||
source = "hashicorp/google-beta"
|
||||
version = "4.83.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
data "google_compute_image" "image_ubuntu" {
|
||||
family = "ubuntu-2204-lts"
|
||||
project = "ubuntu-os-cloud"
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "vm_instance" {
|
||||
name = "${var.base_name}-jumphost"
|
||||
machine_type = "n2d-standard-4"
|
||||
zone = var.zone
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image = data.google_compute_image.image_ubuntu.self_link
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = var.subnetwork
|
||||
access_config {
|
||||
}
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["compute-ro"]
|
||||
}
|
||||
|
||||
labels = var.labels
|
||||
|
||||
metadata = {
|
||||
serial-port-enable = "TRUE"
|
||||
}
|
||||
|
||||
metadata_startup_script = <<EOF
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
# Uncomment to create user with password
|
||||
# useradd -m user
|
||||
# usermod -aG sudo user
|
||||
# usermod --shell /bin/bash user
|
||||
# sh -c "echo \"user:pass\" | chpasswd"
|
||||
|
||||
sysctl -w net.ipv4.ip_forward=1
|
||||
sysctl -p
|
||||
|
||||
internal_ip=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+')
|
||||
|
||||
lb_ip=${var.lb_internal_ip}
|
||||
if [[ ! $${lb_ip} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
lb_ip=$(dig +short ${var.lb_internal_ip})
|
||||
fi
|
||||
%{for port in var.ports~}
|
||||
iptables -t nat -A PREROUTING -p tcp --dport ${port} -j DNAT --to-destination ${var.lb_internal_ip}:${port}
|
||||
iptables -t nat -A POSTROUTING -p tcp -d ${var.lb_internal_ip} --dport ${port} -j SNAT --to-source $${internal_ip}
|
||||
%{endfor~}
|
||||
EOF
|
||||
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
output "ip" {
|
||||
value = google_compute_instance.vm_instance.network_interface[0].access_config[0].nat_ip
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
variable "base_name" {
|
||||
type = string
|
||||
description = "Base name of the instance group."
|
||||
}
|
||||
|
||||
variable "labels" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "Labels to apply to the instance group."
|
||||
}
|
||||
|
||||
variable "subnetwork" {
|
||||
type = string
|
||||
description = "Name of the subnetwork to use."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "Zone to deploy the instance group in."
|
||||
}
|
||||
|
||||
variable "lb_internal_ip" {
|
||||
type = string
|
||||
description = "Internal IP of the load balancer."
|
||||
}
|
||||
|
||||
variable "ports" {
|
||||
type = list(number)
|
||||
description = "Ports to forward to the load balancer."
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.83.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "${var.name}-${var.backend_port_name}"
|
||||
}
|
||||
|
||||
resource "google_compute_health_check" "health" {
|
||||
name = local.name
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
dynamic "tcp_health_check" {
|
||||
for_each = var.health_check == "TCP" ? [1] : []
|
||||
content {
|
||||
port = var.port
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "https_health_check" {
|
||||
for_each = var.health_check == "HTTPS" ? [1] : []
|
||||
content {
|
||||
host = ""
|
||||
port = var.port
|
||||
request_path = "/readyz"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_backend_service" "backend" {
|
||||
name = local.name
|
||||
protocol = "TCP"
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
health_checks = [google_compute_health_check.health.self_link]
|
||||
port_name = var.backend_port_name
|
||||
timeout_sec = 240
|
||||
|
||||
dynamic "backend" {
|
||||
for_each = var.backend_instance_groups
|
||||
content {
|
||||
group = backend.value
|
||||
balancing_mode = "UTILIZATION"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_tcp_proxy" "proxy" {
|
||||
name = local.name
|
||||
backend_service = google_compute_backend_service.backend.self_link
|
||||
}
|
||||
|
||||
resource "google_compute_global_forwarding_rule" "forwarding" {
|
||||
name = local.name
|
||||
ip_address = var.ip_address
|
||||
ip_protocol = "TCP"
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
port_range = var.port
|
||||
target = google_compute_target_tcp_proxy.proxy.self_link
|
||||
labels = var.frontend_labels
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Base name of the load balancer."
|
||||
}
|
||||
|
||||
variable "health_check" {
|
||||
type = string
|
||||
description = "The type of the health check. 'HTTPS' or 'TCP'."
|
||||
}
|
||||
|
||||
variable "backend_port_name" {
|
||||
type = string
|
||||
description = "Name of backend port. The same name should appear in the instance groups referenced by this service."
|
||||
}
|
||||
|
||||
variable "backend_instance_groups" {
|
||||
type = list(string)
|
||||
description = "The URLs of the instance group resources from which the load balancer will direct traffic."
|
||||
}
|
||||
|
||||
variable "ip_address" {
|
||||
type = string
|
||||
description = "The IP address that this forwarding rule serves. An address can be specified either by a literal IP address or a reference to an existing Address resource."
|
||||
}
|
||||
|
||||
variable "port" {
|
||||
type = number
|
||||
description = "The port on which to listen for incoming traffic."
|
||||
}
|
||||
|
||||
variable "frontend_labels" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "Labels to apply to the forwarding rule."
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
output "out_of_cluster_endpoint" {
|
||||
value = local.out_of_cluster_endpoint
|
||||
}
|
||||
|
||||
output "in_cluster_endpoint" {
|
||||
value = local.in_cluster_endpoint
|
||||
}
|
||||
output "api_server_cert_sans" {
|
||||
value = sort(
|
||||
distinct(
|
||||
concat(
|
||||
[
|
||||
local.in_cluster_endpoint,
|
||||
local.out_of_cluster_endpoint,
|
||||
],
|
||||
var.custom_endpoint == "" ? [] : [var.custom_endpoint],
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
output "uid" {
|
||||
value = local.uid
|
||||
}
|
||||
|
||||
output "initSecret" {
|
||||
value = random_password.initSecret.result
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "project" {
|
||||
value = var.project
|
||||
}
|
||||
|
||||
output "ip_cidr_nodes" {
|
||||
value = local.cidr_vpc_subnet_nodes
|
||||
}
|
||||
|
||||
output "ip_cidr_pods" {
|
||||
value = local.cidr_vpc_subnet_pods
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = local.name
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
default = "constell"
|
||||
description = "Base name of the cluster."
|
||||
}
|
||||
|
||||
variable "node_groups" {
|
||||
type = map(object({
|
||||
role = string
|
||||
zone = string
|
||||
instance_type = string
|
||||
disk_size = number
|
||||
disk_type = string
|
||||
initial_count = number
|
||||
}))
|
||||
description = "A map of node group names to node group configurations."
|
||||
validation {
|
||||
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "project" {
|
||||
type = string
|
||||
description = "The GCP project to deploy the cluster in."
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "The GCP region to deploy the cluster in."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "The GCP zone to deploy the cluster in."
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
type = string
|
||||
description = "The GCP image to use for the cluster nodes."
|
||||
}
|
||||
|
||||
variable "debug" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Enable debug mode. This opens up a debugd port that can be used to deploy a custom bootstrapper."
|
||||
}
|
||||
|
||||
variable "custom_endpoint" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
|
||||
}
|
||||
|
||||
variable "internal_load_balancer" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Enable internal load balancer. This can only be enabled if the control-plane is deployed in one zone."
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/aws" {
|
||||
version = "5.17.0"
|
||||
constraints = "5.17.0"
|
||||
hashes = [
|
||||
"h1:+riTtJ8Tqjd6js1SGim+926BtDuxy8Jn4F+xV8LXvvg=",
|
||||
"h1:7XJ6tsfZR1m2RTHkJHmp7FtNxz8JP5Y/7p89RPebcAY=",
|
||||
"h1:A/Z75kGeHrP3euYJv1OaGfTVy63NXIeUj1YBHg4TdO0=",
|
||||
"h1:GVzgP42qi2UlVUyPqizWhFyaA9SpxhmAnPqwLKVxBqM=",
|
||||
"h1:IOvWK6rZ2e8AubIWAfKzqI+9AcG+QNPcMOZlujhO840=",
|
||||
"h1:OJMhYliR4PFDrTtOPocwq4NfuYZVGmmxwInPmcIC1x0=",
|
||||
"h1:U+EDfeUqefebA1h7KyBMD1xH0h311LMi7wijPDPkC/0=",
|
||||
"h1:WxtQKHotfGqgEJrV3Flb0CWziKxDfOz2RZUAIT09Uss=",
|
||||
"h1:fKgoYBRyK55vJSChUHPptDUQuXqjfDjVKVJ11+scq64=",
|
||||
"h1:lTrdAde+ANuM0Cn+RLFE0sOl2iFoWK9It0dIqi+xkv8=",
|
||||
"h1:pHssdSGtZ9R9lk3IfQIy20SpFjjZdBzvybDsT/y2cQA=",
|
||||
"h1:pcDQYPgf/6OQCapDcRV/RTvToi9qXHFsY16rvZx3vsY=",
|
||||
"h1:rplvK7UGP2FuzM44t2eRX+QYYPC0aUIoKdi5XayRI8M=",
|
||||
"h1:ytz93JU2mhkKFYWj0V5TV5GMH0v6cIekk485rn9me6A=",
|
||||
"zh:0087b9dd2c9c638fd63e527e5b9b70988008e263d480a199f180efe5a4f070f0",
|
||||
"zh:0fd532a4fd03ddef11f0502ff9fe4343443e1ae805cb088825a71d6d48906ec7",
|
||||
"zh:16411e731100cd15f7e165f53c23be784b2c86c2fcfd34781e0642d17090d342",
|
||||
"zh:251d520927e77f091e2ec6302e921d839a2430ac541c6a461aed7c08fb5eae12",
|
||||
"zh:4919e69682dc2a8c32d44f6ebc038a52c9f40af9c61cb574b64e322800d6a794",
|
||||
"zh:5334c60759d5f76bdc51355d1a3ebcc451d4d20f632f5c73b6e55c52b5dc9e52",
|
||||
"zh:7341a2b7247572eba0d0486094a870b872967702ec0ac7af728c2df2c30af4e5",
|
||||
"zh:81d1b1cb2cac6b3922a05adab69543b678f344a01debd54500263700dad7a288",
|
||||
"zh:882bc8e15ef6d4020a07321ec4c056977c5c1d96934118032922561d29504d43",
|
||||
"zh:8cd4871ef2b03fd916de1a6dc7eb8a81a354c421177d4334a2e3308e50215e41",
|
||||
"zh:97e12fe6529b21298adf1046c5e20ac35d0569c836a6f385ff041e257e00cfd2",
|
||||
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
|
||||
"zh:9f5baf5d59b9f3cf5504d1fa975f10f27da3791896a9e18ece47c258bac17634",
|
||||
"zh:dffafba6731ac1db1c540bdbd6a8c878486b71de9d0ca1d23c5c00a6c3c14d80",
|
||||
"zh:fa7440c3c15a42fc5731444d324ced75407d417bfe3184661ae47d40a9718dce",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.5.1"
|
||||
hashes = [
|
||||
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
|
||||
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
|
||||
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
|
||||
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
|
||||
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
|
||||
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
|
||||
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
|
||||
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
|
||||
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
|
||||
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
|
||||
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
|
||||
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
|
||||
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
|
||||
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
|
||||
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
|
||||
]
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
# IAM instance profiles for AWS
|
||||
|
||||
This terraform script creates the necessary profiles that need to be attached to Constellation nodes.
|
||||
|
||||
You can create the profiles with the following commands:
|
||||
|
||||
```sh
|
||||
mkdir constellation_aws_iam
|
||||
cd constellation_aws_iam
|
||||
curl --remote-name-all https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/terraform/aws/iam/{main,output,variables}.tf
|
||||
terraform init
|
||||
terraform apply -auto-approve -var name_prefix=my_constellation
|
||||
```
|
||||
|
||||
You can either get the profile names from the Terraform output values `control_plane_instance_profile` and `worker_nodes_instance_profile` and manually add them to your Constellation configuration file.
|
||||
|
||||
Or you can do this with a `yq` command:
|
||||
|
||||
```sh
|
||||
yq -i "
|
||||
.provider.aws.iamProfileControlPlane = $(terraform output control_plane_instance_profile) |
|
||||
.provider.aws.iamProfileWorkerNodes = $(terraform output worker_nodes_instance_profile)
|
||||
" path/to/constellation-conf.yaml
|
||||
```
|
|
@ -1,245 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.17.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Configure the AWS Provider
|
||||
provider "aws" {
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 8
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "control_plane_instance_profile" {
|
||||
name = "${var.name_prefix}_control_plane_instance_profile"
|
||||
role = aws_iam_role.control_plane_role.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "control_plane_role" {
|
||||
name = "${var.name_prefix}_control_plane_role"
|
||||
path = "/"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "control_plane_policy" {
|
||||
name = "${var.name_prefix}_control_plane_policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:DescribeTargetGroupAttributes",
|
||||
"elasticloadbalancing:DescribeRules",
|
||||
"shield:GetSubscriptionState",
|
||||
"elasticloadbalancing:DescribeListeners",
|
||||
"elasticloadbalancing:ModifyTargetGroupAttributes",
|
||||
"elasticloadbalancing:DescribeTags",
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
"ec2:AttachVolume",
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:CreateRoute",
|
||||
"ec2:CreateSecurityGroup",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:DeleteRoute",
|
||||
"ec2:DeleteSecurityGroup",
|
||||
"ec2:DeleteVolume",
|
||||
"ec2:DescribeAvailabilityZones",
|
||||
"ec2:DescribeImages",
|
||||
"ec2:DescribeInstances",
|
||||
"ec2:DescribeRegions",
|
||||
"ec2:DescribeRouteTables",
|
||||
"ec2:DescribeSecurityGroups",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:DescribeVolumes",
|
||||
"ec2:DescribeVpcs",
|
||||
"ec2:DetachVolume",
|
||||
"ec2:ModifyInstanceAttribute",
|
||||
"ec2:ModifyVolume",
|
||||
"ec2:RevokeSecurityGroupIngress",
|
||||
"elasticloadbalancing:AddTags",
|
||||
"elasticloadbalancing:AddTags",
|
||||
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
|
||||
"elasticloadbalancing:AttachLoadBalancerToSubnets",
|
||||
"elasticloadbalancing:ConfigureHealthCheck",
|
||||
"elasticloadbalancing:CreateListener",
|
||||
"elasticloadbalancing:CreateLoadBalancer",
|
||||
"elasticloadbalancing:CreateLoadBalancerListeners",
|
||||
"elasticloadbalancing:CreateLoadBalancerPolicy",
|
||||
"elasticloadbalancing:CreateTargetGroup",
|
||||
"elasticloadbalancing:DeleteListener",
|
||||
"elasticloadbalancing:DeleteLoadBalancer",
|
||||
"elasticloadbalancing:DeleteLoadBalancerListeners",
|
||||
"elasticloadbalancing:DeleteTargetGroup",
|
||||
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
|
||||
"elasticloadbalancing:DeregisterTargets",
|
||||
"elasticloadbalancing:DescribeListeners",
|
||||
"elasticloadbalancing:DescribeLoadBalancerAttributes",
|
||||
"elasticloadbalancing:DescribeLoadBalancerPolicies",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:DescribeTargetGroups",
|
||||
"elasticloadbalancing:DescribeTargetHealth",
|
||||
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
|
||||
"elasticloadbalancing:ModifyListener",
|
||||
"elasticloadbalancing:ModifyLoadBalancerAttributes",
|
||||
"elasticloadbalancing:ModifyTargetGroup",
|
||||
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
|
||||
"elasticloadbalancing:RegisterTargets",
|
||||
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
|
||||
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
|
||||
"iam:CreateServiceLinkedRole",
|
||||
"kms:DescribeKey",
|
||||
"logs:CreateLogStream",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:ListTagsLogGroup",
|
||||
"logs:PutLogEvents",
|
||||
"tag:GetResources",
|
||||
"ec2:DescribeLaunchTemplateVersions",
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
||||
"ec2:DescribeInstanceStatus",
|
||||
"ec2:CreateLaunchTemplateVersion",
|
||||
"ec2:ModifyLaunchTemplate"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "attach_control_plane_policy" {
|
||||
role = aws_iam_role.control_plane_role.name
|
||||
policy_arn = aws_iam_policy.control_plane_policy.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "worker_node_instance_profile" {
|
||||
name = "${var.name_prefix}_worker_node_instance_profile"
|
||||
role = aws_iam_role.worker_node_role.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "worker_node_role" {
|
||||
name = "${var.name_prefix}_worker_node_role"
|
||||
path = "/"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "worker_node_policy" {
|
||||
name = "${var.name_prefix}_worker_node_policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeImages",
|
||||
"ec2:DescribeInstances",
|
||||
"ec2:DescribeRegions",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:BatchGetImage",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetRepositoryPolicy",
|
||||
"ecr:ListImages",
|
||||
"logs:CreateLogStream",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:ListTagsLogGroup",
|
||||
"logs:PutLogEvents",
|
||||
"tag:GetResources"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "attach_worker_node_policy" {
|
||||
role = aws_iam_role.worker_node_role.name
|
||||
policy_arn = aws_iam_policy.worker_node_policy.arn
|
||||
}
|
||||
|
||||
// Add all permissions here, which are needed by the bootstrapper
|
||||
resource "aws_iam_policy" "constellation_bootstrapper_policy" {
|
||||
name = "${var.name_prefix}_constellation_bootstrapper_policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"elasticloadbalancing:DescribeLoadBalancers"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "attach_bootstrapper_policy_worker" {
|
||||
role = aws_iam_role.worker_node_role.name
|
||||
policy_arn = aws_iam_policy.constellation_bootstrapper_policy.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "attach_bootstrapper_policy_control_plane" {
|
||||
role = aws_iam_role.control_plane_role.name
|
||||
policy_arn = aws_iam_policy.constellation_bootstrapper_policy.arn
|
||||
}
|
||||
|
||||
// TODO(msanft): incorporate this into the custom worker node policy
|
||||
resource "aws_iam_role_policy_attachment" "csi_driver_policy_worker" {
|
||||
role = aws_iam_role.worker_node_role.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
|
||||
}
|
||||
|
||||
// TODO(msanft): incorporate this into the custom control-plane node policy
|
||||
resource "aws_iam_role_policy_attachment" "csi_driver_policy_control_plane" {
|
||||
role = aws_iam_role.control_plane_role.name
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
output "control_plane_instance_profile" {
|
||||
value = aws_iam_instance_profile.control_plane_instance_profile.name
|
||||
}
|
||||
|
||||
output "worker_nodes_instance_profile" {
|
||||
value = aws_iam_instance_profile.worker_node_instance_profile.name
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
variable "name_prefix" {
|
||||
type = string
|
||||
description = "Prefix for all resources"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "AWS region"
|
||||
default = "us-east-2"
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/azuread" {
|
||||
version = "2.43.0"
|
||||
constraints = "2.43.0"
|
||||
hashes = [
|
||||
"h1:/wPCaaEC7By9zWMxYmPMiLNu+zuYuFUyl5mkhCwwi8w=",
|
||||
"h1:393kftLqVFxsI3iuhPI5sI2pXh6q6I/14BsYcU+5FYQ=",
|
||||
"h1:52dVXjGwnVZQB9CiqCZ2wY/rM2ammozM8zPYx0uDTi0=",
|
||||
"h1:83hGDTWccRJXsKGg1VeYJkBeHwE2cCTRKFCZud4iWQo=",
|
||||
"h1:M9osLDURE2DYjHbvScgvuDzz5vv76Oc0dsDGVcy7aZE=",
|
||||
"h1:OJ9UCrtYFlkKmXKfN4fKF9CEysslKk42VztnOFrl/BY=",
|
||||
"h1:WrNRFJa759DoGy1zjf7a96OLWVKyicVsk93ZrQCO2nc=",
|
||||
"h1:bp9HeofaEJDiWtyLMwIEYVgxP5yoMs/dQhjCYsbXU34=",
|
||||
"h1:jmvCGhwc+jUip0Hy4PI1ZiO/11vdQ3TTp3YaBTKFGiQ=",
|
||||
"h1:tB16uWp5AfxDeFJQQfIGMvLjpqAprDT0tvLX0VyS51M=",
|
||||
"h1:tU/kGFohqNia+uVFT1ujYKZRH2lvEP73LUhQDJtO1w4=",
|
||||
"zh:1c3e89cf19118fc07d7b04257251fc9897e722c16e0a0df7b07fcd261f8c12e7",
|
||||
"zh:2225e2e97ccba4ed1d84f1d430f1ebd837943fe187e57f24f1763172dda61556",
|
||||
"zh:24708cb09411a766ff397e05cae49058ca38edc718db303a7faef9823402737d",
|
||||
"zh:3a61167ff58d585abd56233731a8fd649c7c04272bd5b878f963883496e19192",
|
||||
"zh:433f557634b5e663caaeb68c504c7771c186eba7ecf5d4030437956bc6599ecb",
|
||||
"zh:5e8cc3b3bcc22d217cf588c821ce091c7d40f0815aecc1addde5355c17cb381d",
|
||||
"zh:7b008c376097cd60259d43f58fcb33fee56fe9aebb4a94ed7958868ee501d7d0",
|
||||
"zh:908907fd38537583ea60dccbf73055ae1a2963acc399be4f8e9a6616a9a537db",
|
||||
"zh:966586cfd850606bab7dd2242c5b9e35d3a7178f64eaac0b44dea54c104c8169",
|
||||
"zh:a624286401913d3ec44b4825e2c5ae38ac94fb4950aeed8f4b91d09c898f8cce",
|
||||
"zh:b5171a4463fd0d9b0ce2a08605499b6d99fe93d6fc3f4143e9a26201065cc90a",
|
||||
"zh:cdcfeeb9db4dbdc6f1fb5644453b37dbd0025b4f3127e9ff348f1e62d66b493e",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/azurerm" {
|
||||
version = "3.74.0"
|
||||
constraints = "3.74.0"
|
||||
hashes = [
|
||||
"h1:1kSiowd/tBNswp3iv7ePlzkP5llWihjHcY3pdXdJqVU=",
|
||||
"h1:4b15khHtc5OkIVEFg0W5QRwf/ov1WVQkXVdSiAcTCS8=",
|
||||
"h1:ETVZfmulZQ435+lgFCkZRpfVOLyAxfDOwbPXFg3aLLQ=",
|
||||
"h1:H3diAufZ5VDQKsQNYykVRaFTOUJ4gjFiT2VLYi574+w=",
|
||||
"h1:LEdK8BxNSNiBQbtcJhQZKMMHDjmPpUsvDpr3Mzs93Tg=",
|
||||
"h1:OtJKZcMwrRNR84ylT1GgMwGR8KTxVOCkNifbjABlGj0=",
|
||||
"h1:Rq+CNb+4u47dw20tlAeI2yxSOuDtLm+S/GZO2pneLyA=",
|
||||
"h1:VfBB00BE0wvFiod7BlL+Cn6r2599MEi94hnAQ277ux8=",
|
||||
"h1:YJ15rwD0G7lYc9OVh5GO4VTqcd2jhqegfgyqTJH1M/I=",
|
||||
"h1:YvxxiqiwXjZdU53u3b9q49ezsIAb59KmdLLFkwkwFAs=",
|
||||
"h1:xDRmcV40KrWttPYg/w0/IN/frS9K1twuyvqRNVZko44=",
|
||||
"zh:0424c70152f949da1ec52ba96d20e5fd32fd22d9bd9203ce045d5f6aab3d20fc",
|
||||
"zh:16dbf581d10f8e7937185bcdcceb4f91d08c919e452fb8da7580071288c8c397",
|
||||
"zh:3019103bc2c3b4e185f5c65696c349697644c968f5c085af5505fed6d01c4241",
|
||||
"zh:49bb56ebaed6653fdb913c2b2bb74fc8b5399e7258d1e89084f72c44ea1130dd",
|
||||
"zh:85547666517f899d88620bd23a000a8f43c7dc93587c350eb1ea17bcb3e645c7",
|
||||
"zh:8bed8b646ff1822d8764de68b56b71e5dd971a4b77eba80d47f400a530800bea",
|
||||
"zh:8bfa6c70c004ba05ebce47f74f49ce872c28a68a18bb71b281a9681bcbbdbfa1",
|
||||
"zh:a2ae9e38fda0695fb8aa810e4f1ce4b104bfda651a87923b307bb1728680d8b6",
|
||||
"zh:beac1efe32f99072c892095f5ff46e40d6852b66679a03bc3acbe1b90fb1f653",
|
||||
"zh:d8a6ca20e49ebe7ea5688d91233d571e2c2ccc3e41000c39a7d7031df209ea8e",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
"zh:f937b5fdf49b072c0347408d0a1c5a5d822dae1a23252915930e5a82d1d8ce8b",
|
||||
]
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
# Terraform Azure IAM creation
|
||||
|
||||
This terraform configuration creates the necessary Azure resources that need to be available to host a Constellation cluster.
|
||||
|
||||
You can create the resources with the following commands:
|
||||
|
||||
```sh
|
||||
mkdir constellation_azure_iam
|
||||
cd constellation_azure_iam
|
||||
curl --remote-name-all https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/terraform/azure/iam/{main.tf,output.tf,variables.tf,.terraform.lock.hcl}
|
||||
terraform init
|
||||
terraform apply
|
||||
```
|
||||
|
||||
The following terraform output values are available (with their corresponding keys in the Constellation configuration file):
|
||||
|
||||
- `subscription_id` (subscription)
|
||||
- `tenant_id` (tenant)
|
||||
- `uami_id` (userAssignedIdentity)
|
||||
|
||||
You can either get the profile names from the Terraform output and manually add them to your Constellation configuration file according to our [Documentation](https://docs.edgeless.systems/constellation/getting-started/first-steps).
|
||||
Or you can do this with a `yq` command:
|
||||
|
||||
```sh
|
||||
yq -i "
|
||||
.provider.azure.subscription = $(terraform output subscription_id) |
|
||||
.provider.azure.tenant = $(terraform output tenant_id) |
|
||||
.provider.azure.userAssignedIdentity = $(terraform output uami_id) |
|
||||
" path/to/constellation-conf.yaml
|
||||
```
|
||||
|
||||
Where `path/to/constellation-conf.yaml` is the path to your Constellation configuration file.
|
|
@ -1,70 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "3.74.0"
|
||||
}
|
||||
azuread = {
|
||||
source = "hashicorp/azuread"
|
||||
version = "2.43.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Configure Azure resource management provider
|
||||
provider "azurerm" {
|
||||
features {
|
||||
resource_group {
|
||||
prevent_deletion_if_contains_resources = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Configure Azure active directory provider
|
||||
provider "azuread" {
|
||||
tenant_id = data.azurerm_subscription.current.tenant_id
|
||||
}
|
||||
|
||||
# Access current subscription (available via Azure CLI)
|
||||
data "azurerm_subscription" "current" {}
|
||||
|
||||
# # Access current AzureAD configuration
|
||||
data "azuread_client_config" "current" {}
|
||||
|
||||
# Create base resource group
|
||||
resource "azurerm_resource_group" "base_resource_group" {
|
||||
name = var.resource_group_name
|
||||
location = var.region
|
||||
}
|
||||
|
||||
# Create identity resource group
|
||||
resource "azurerm_resource_group" "identity_resource_group" {
|
||||
name = "${var.resource_group_name}-identity"
|
||||
location = var.region
|
||||
}
|
||||
|
||||
# Create managed identity
|
||||
resource "azurerm_user_assigned_identity" "identity_uami" {
|
||||
location = var.region
|
||||
name = var.service_principal_name
|
||||
resource_group_name = azurerm_resource_group.identity_resource_group.name
|
||||
}
|
||||
|
||||
# Assign roles to managed identity
|
||||
resource "azurerm_role_assignment" "virtual_machine_contributor_role" {
|
||||
scope = azurerm_resource_group.base_resource_group.id
|
||||
role_definition_name = "Virtual Machine Contributor"
|
||||
principal_id = azurerm_user_assigned_identity.identity_uami.principal_id
|
||||
}
|
||||
|
||||
resource "azurerm_role_assignment" "application_insights_component_contributor_role" {
|
||||
scope = azurerm_resource_group.base_resource_group.id
|
||||
role_definition_name = "Application Insights Component Contributor"
|
||||
principal_id = azurerm_user_assigned_identity.identity_uami.principal_id
|
||||
}
|
||||
|
||||
resource "azurerm_role_assignment" "uami_owner_role" {
|
||||
scope = azurerm_resource_group.base_resource_group.id
|
||||
role_definition_name = "Owner"
|
||||
principal_id = azurerm_user_assigned_identity.identity_uami.principal_id
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
output "subscription_id" {
|
||||
value = data.azurerm_subscription.current.subscription_id
|
||||
}
|
||||
|
||||
output "tenant_id" {
|
||||
value = data.azurerm_subscription.current.tenant_id
|
||||
}
|
||||
|
||||
output "uami_id" {
|
||||
description = "Outputs the id in the format: /$ID/resourceGroups/$RG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$NAME. Not to be confused with the client_id"
|
||||
value = azurerm_user_assigned_identity.identity_uami.id
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
variable "resource_group_name" {
|
||||
type = string
|
||||
description = "Resource group name"
|
||||
}
|
||||
|
||||
variable "service_principal_name" {
|
||||
type = string
|
||||
description = "Service principal name"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "Azure resource location"
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/google" {
|
||||
version = "4.83.0"
|
||||
constraints = "4.83.0"
|
||||
hashes = [
|
||||
"h1:04Dbo1eT5GovugyMTr78SetNLLXBVhzMeo67Noyu85o=",
|
||||
"h1:BOrMAGh1FwA290rqwOHJKdfYOhOyqcKiqunZ6K/qA6k=",
|
||||
"h1:QXESvZlpwchznnilwGfL5nwbYTNlJLl4RyV5TXjKZVY=",
|
||||
"h1:SmoEOGxSXmrWceJP4YVmpgdsnEk01OCZhwEUUViy0c0=",
|
||||
"h1:cWBKJt7QJ+MKerSq73qFICJkIsxHn1JepalZzR/eRk4=",
|
||||
"h1:dPId6xBo8+uET30DqkB400hKbMGR60NoxMkw1FFzvjA=",
|
||||
"h1:jvTOwFMz4iyq/4AjU6QjTOlL5R0etYt98tC7D/6eE1M=",
|
||||
"h1:lvCQfxljF0bY15qI78bxl9d1pW6o60WcyNp9ZQcx3DU=",
|
||||
"h1:nyeDdFmfYBFj3+Ng6IwfdSgo+D4fsCAbbTPmwPidQC8=",
|
||||
"h1:qx6znUIkV7pzjp1MgoLLUT+3hyv5zYbSdVho+JUUBKk=",
|
||||
"h1:x9rGt85+aTXPVhTtNJ4bdV5Wy3uJDJbVg+D0e0h/uiY=",
|
||||
"zh:0310360982c3d42449ef103fab0819770aa96c7813507778d71ed016942bed96",
|
||||
"zh:0d0f82ce5e54267641b1f1d494a3ad1ddd41a7553910dd33abd6a114feab6881",
|
||||
"zh:0eda79e53a1833e8692273f5d7224344200e49303e579aec7b53762f50f39210",
|
||||
"zh:3c0cf4abaf461238563132ab4564965bc6bd571eb3bbeedac89258a9a688b169",
|
||||
"zh:61d619e5163daeeb7909443cc0c67816939a1748aec2fe544ab3f380270aae92",
|
||||
"zh:66d9da66aec8575ee16b70b42a5ae082b2f43f4a84a844363a585806ac75cca0",
|
||||
"zh:875c5596f365130095ccc2150755b6fb8a6d9fe9af4af9f595029716be02cdef",
|
||||
"zh:a9af92cd6ea160618d6433c92297a4e3f3dc7a2e964516e1e7b51ce70f3ec178",
|
||||
"zh:b9566bd1910462b4d92c6976184c4408e42a3ef6a300962b49866aa0f6f29b11",
|
||||
"zh:bae735a81a04244893fd9e81d9b5d6c321d874cb37a7b5aab8a1c8c5044b362d",
|
||||
"zh:d97ae1676d793696498e0eda8324bc02edbd2fbbcd76eb103a949876ec1fe8c0",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/null" {
|
||||
version = "3.2.1"
|
||||
hashes = [
|
||||
"h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=",
|
||||
"h1:tSj1mL6OQ8ILGqR2mDu7OYYYWf+hoir0pf9KAQ8IzO8=",
|
||||
"h1:vUW21lLLsKlxtBf0QF7LKJreKxs0CM7YXGzqW1N/ODY=",
|
||||
"h1:wqgRvlyVIbkCeCQs+5jj6zVuQL0KDxZZtNofGqqlSdI=",
|
||||
"h1:ydA0/SNRVB1o95btfshvYsmxA+jZFRZcvKzZSB+4S1M=",
|
||||
"zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840",
|
||||
"zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb",
|
||||
"zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5",
|
||||
"zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238",
|
||||
"zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc",
|
||||
"zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970",
|
||||
"zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2",
|
||||
"zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5",
|
||||
"zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f",
|
||||
"zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694",
|
||||
]
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
# IAM configuration for GCP
|
||||
|
||||
This terraform script creates the necessary GCP IAM configuration to be attached to Constellation nodes.
|
||||
|
||||
You can create the configuration with the following commands:
|
||||
|
||||
```sh
|
||||
mkdir constellation_gcp_iam
|
||||
cd constellation_gcp_iam
|
||||
curl --remote-name-all https://raw.githubusercontent.com/edgelesssys/constellation/main/cli/internal/terraform/terraform/iam/gcp/{main.tf,outputs.tf,variables.tf,.terraform.lock.hcl}
|
||||
terraform init
|
||||
terraform apply
|
||||
```
|
||||
|
||||
The following terraform output values are available (with their corresponding keys in the Constellation configuration file):
|
||||
|
||||
- `sa_key` - **Sensitive Value**
|
||||
- `region` (region)
|
||||
- `zone` (zone)
|
||||
- `project_id` (project)
|
||||
|
||||
You can either get the values from the Terraform output and manually add them to your Constellation configuration file according to our [Documentation](https://docs.edgeless.systems/constellation/getting-started/first-steps). (If you add the values manually, you need to base64-decode the `sa_key` value and place it in a JSON file, then specify the path to this file in the Constellation configuration file for the `serviceAccountKeyPath` key.)
|
||||
|
||||
Or you can setup the constellation configuration file automaticcaly with the following commands:
|
||||
|
||||
```sh
|
||||
terraform output sa_key | sed "s/\"//g" | base64 --decode | tee gcpServiceAccountKey.json
|
||||
yq -i "
|
||||
.provider.gcp.serviceAccountKeyPath = \"$(realpath gcpServiceAccountKey.json)\" |
|
||||
.provider.gcp.project = $(terraform output project_id) |
|
||||
.provider.gcp.region = $(terraform output region) |
|
||||
.provider.gcp.zone = $(terraform output zone)
|
||||
" path/to/constellation-conf.yaml
|
||||
```
|
||||
|
||||
Where `path/to/constellation-conf.yaml` is the path to your Constellation configuration file.
|
|
@ -1,71 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.83.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
project = var.project_id
|
||||
region = var.region
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
resource "google_service_account" "service_account" {
|
||||
account_id = var.service_account_id
|
||||
display_name = "Constellation service account"
|
||||
description = "Service account used inside Constellation"
|
||||
}
|
||||
|
||||
// service_account creation is eventually consistent so add a delay to ensure it is created before the next step: https://registry.terraform.io/providers/hashicorp/google/4.69.1/docs/resources/google_service_account.html
|
||||
resource "null_resource" "delay" {
|
||||
provisioner "local-exec" {
|
||||
command = "sleep 15"
|
||||
}
|
||||
triggers = {
|
||||
"service_account" = "${google_service_account.service_account.id}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "google_project_iam_member" "instance_admin_role" {
|
||||
project = var.project_id
|
||||
role = "roles/compute.instanceAdmin.v1"
|
||||
member = "serviceAccount:${google_service_account.service_account.email}"
|
||||
depends_on = [null_resource.delay]
|
||||
}
|
||||
|
||||
resource "google_project_iam_member" "network_admin_role" {
|
||||
project = var.project_id
|
||||
role = "roles/compute.networkAdmin"
|
||||
member = "serviceAccount:${google_service_account.service_account.email}"
|
||||
depends_on = [null_resource.delay]
|
||||
}
|
||||
|
||||
resource "google_project_iam_member" "security_admin_role" {
|
||||
project = var.project_id
|
||||
role = "roles/compute.securityAdmin"
|
||||
member = "serviceAccount:${google_service_account.service_account.email}"
|
||||
depends_on = [null_resource.delay]
|
||||
}
|
||||
|
||||
resource "google_project_iam_member" "storage_admin_role" {
|
||||
project = var.project_id
|
||||
role = "roles/compute.storageAdmin"
|
||||
member = "serviceAccount:${google_service_account.service_account.email}"
|
||||
depends_on = [null_resource.delay]
|
||||
}
|
||||
|
||||
resource "google_project_iam_member" "iam_service_account_user_role" {
|
||||
project = var.project_id
|
||||
role = "roles/iam.serviceAccountUser"
|
||||
member = "serviceAccount:${google_service_account.service_account.email}"
|
||||
depends_on = [null_resource.delay]
|
||||
}
|
||||
|
||||
resource "google_service_account_key" "service_account_key" {
|
||||
service_account_id = google_service_account.service_account.name
|
||||
depends_on = [null_resource.delay]
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
output "sa_key" {
|
||||
value = google_service_account_key.service_account_key.private_key
|
||||
sensitive = true
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
variable "project_id" {
|
||||
type = string
|
||||
description = "GCP Project ID"
|
||||
}
|
||||
|
||||
variable "service_account_id" {
|
||||
type = string
|
||||
description = "ID for the service account being created. Must match ^[a-z](?:[-a-z0-9]{4,28}[a-z0-9])$"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "Region used for constellation clusters. Needs to have the N2D machine type available."
|
||||
}
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
description = "Zone used for constellation clusters. Needs to be within the specified region."
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.5.1"
|
||||
constraints = "3.5.1"
|
||||
hashes = [
|
||||
"h1:0ULxM8/DscMzfiDWg1yclBf/39U44wQmlx745BfYZ80=",
|
||||
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
|
||||
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
|
||||
"h1:6ePAACdONiMGe1j5pwUc0gpDFt82y/ka0zRimMg/geM=",
|
||||
"h1:BD3Y4CcrGHb9sx+Bl5V8M2PSyw23mykzXSwj+/6FhHA=",
|
||||
"h1:HGeb7Tajn7HZwX0MhrdyL57LoCSz5GMcI2wbHs12D4U=",
|
||||
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
|
||||
"h1:JiENkIxSWc32/2Dtd1n4CWY3ow/PHvAeGhdgcOLpWZM=",
|
||||
"h1:MROYZuKGTuaTNf2FgbwCgSVpteQW25ubnb+Xfok2jvk=",
|
||||
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
|
||||
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
|
||||
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
|
||||
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
|
||||
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
|
||||
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
|
||||
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
|
||||
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
|
||||
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
|
||||
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
|
||||
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
|
||||
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/terraform-provider-openstack/openstack" {
|
||||
version = "1.52.1"
|
||||
constraints = "1.52.1"
|
||||
hashes = [
|
||||
"h1:A+g4494kx7lEsJe4M16GzM9MKNPBR5tnzbZ9+33QlkM=",
|
||||
"h1:EOTtuZvB5p6DRvmHZutxA5maQUkMD7EMWTzFf2JL1HM=",
|
||||
"h1:NWz1fLVT0cItJmL8t5tK5AKXrP9EA9FIPaVYWEfhAMQ=",
|
||||
"h1:PAaUknZ3JC1blyZ0BOrIrYAKaV0KKt79SLWDwboG2To=",
|
||||
"h1:TdvatmiIUG+9RB/IIr2E/4ISw7ktF1jUCWrZe/fibaM=",
|
||||
"h1:UB2hQdQ8FA7V4jJa4q9/3sFnsXUhlLKWC8cQqX+H6ZU=",
|
||||
"h1:cuiQP8rBuyh+wAv/ItSKLu4Evro8TMaB0KXL++XB18g=",
|
||||
"h1:iWC3awIqtrlq/AAk5fCRrw7icPRrXducOXYA+1f2q6E=",
|
||||
"h1:iuuAlX04fEyvdJTWsqa3To2lRg9+7meIO3CtIasQFOI=",
|
||||
"h1:kRQXDWW2DnblI6UNmMxNf6jt+CUQ7ENGRs2Nch0aYxI=",
|
||||
"h1:sP0p4CedQh3sErEZ0QIPjaqFkLHMh/OOzUwmb+sdisI=",
|
||||
"h1:scQS826puQFDo6EY0B3Tlk0kXYtm+ru7YPyMM9GCIMI=",
|
||||
"h1:tzawotEtjBcVWnzA+wAqcbkxW7XnJCfXqod4SBts9vI=",
|
||||
"h1:yhED1rCRd7TSqnQmOUb2wiYpQP5EnhUtu3enrcf60K8=",
|
||||
"zh:037f7ab5a0942daee00d23402e7ccab472380864e13013284910fa7841a6e37c",
|
||||
"zh:52ac973e6c5cd584c5086494218e9b49d93217f5fbc34fc76fa8a9ddd635447a",
|
||||
"zh:5acad7b8c7a493fd0b659271743e2853859a4b2669df26f21aecf1b2f60fa706",
|
||||
"zh:5d9218a7f10849f2227fc11df19f78b3b11cccade6b674c314e804f0e98d4368",
|
||||
"zh:91ea6bf80ff706e734300041cf22e946c049abf8dcf1bed899f93f20f7779121",
|
||||
"zh:961d67ebf1116bd539b726ef483f7d67c95351efd09e55fbeb30cd2ca7946a12",
|
||||
"zh:9d3d8ee11cda45804e9b759064fbc9f47d6f54203bd17654236f2f601424b460",
|
||||
"zh:a0af7e5bad6114a7a0ac88cee63e2c14558572e293bebcf651ed8d8d9c20dfda",
|
||||
"zh:a1fd5609f61a43c9c2a403e024042afc3a45fde39935a388009d05105e2d39d3",
|
||||
"zh:bd84aae9f2ac6eb978837ea5994bb24be221e2e4d69a3e8842eef3fcf62594f0",
|
||||
"zh:be690e77aa497ab8bb8ed59f7e03018e96805e2e13df334086a8c5ac4290db09",
|
||||
"zh:c4ee17773e7295b0598e36148ac49b2c61caa6da3f7b02e439aa61ca6486da07",
|
||||
"zh:c871d03abf9c916584dd8fc6b63ed85bbe41208eba684b2175ac741003bf9d25",
|
||||
"zh:f1e5c4a5740ad75b9b37376db4ea0e3067b0c2b6871521bbc6a1625bef137abf",
|
||||
]
|
||||
}
|
|
@ -1,277 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "1.52.1"
|
||||
}
|
||||
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "openstack" {
|
||||
cloud = var.cloud
|
||||
}
|
||||
|
||||
data "openstack_identity_auth_scope_v3" "scope" {
|
||||
name = "scope"
|
||||
}
|
||||
|
||||
locals {
|
||||
uid = random_id.uid.hex
|
||||
name = "${var.name}-${local.uid}"
|
||||
initSecretHash = random_password.initSecret.bcrypt_hash
|
||||
ports_node_range_start = "30000"
|
||||
ports_node_range_end = "32767"
|
||||
ports_kubernetes = "6443"
|
||||
ports_bootstrapper = "9000"
|
||||
ports_konnectivity = "8132"
|
||||
ports_verify = "30081"
|
||||
ports_recovery = "9999"
|
||||
ports_debugd = "4000"
|
||||
cidr_vpc_subnet_nodes = "192.168.178.0/24"
|
||||
tags = ["constellation-uid-${local.uid}"]
|
||||
identity_service = [
|
||||
for entry in data.openstack_identity_auth_scope_v3.scope.service_catalog :
|
||||
entry if entry.type == "identity"
|
||||
][0]
|
||||
identity_endpoint = [
|
||||
for endpoint in local.identity_service.endpoints :
|
||||
endpoint if(endpoint.interface == "public")
|
||||
][0]
|
||||
identity_internal_url = local.identity_endpoint.url
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
resource "random_password" "initSecret" {
|
||||
length = 32
|
||||
special = true
|
||||
override_special = "_%@"
|
||||
}
|
||||
|
||||
resource "openstack_images_image_v2" "constellation_os_image" {
|
||||
name = local.name
|
||||
image_source_url = var.image_url
|
||||
web_download = var.direct_download
|
||||
container_format = "bare"
|
||||
disk_format = "raw"
|
||||
visibility = "private"
|
||||
properties = {
|
||||
hw_firmware_type = "uefi"
|
||||
os_type = "linux"
|
||||
}
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "floating_ip_pool" {
|
||||
network_id = var.floating_ip_pool_id
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "vpc_network" {
|
||||
name = local.name
|
||||
description = "Constellation VPC network"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "vpc_subnetwork" {
|
||||
name = local.name
|
||||
description = "Constellation VPC subnetwork"
|
||||
network_id = openstack_networking_network_v2.vpc_network.id
|
||||
cidr = local.cidr_vpc_subnet_nodes
|
||||
dns_nameservers = [
|
||||
"1.1.1.1",
|
||||
"8.8.8.8",
|
||||
"9.9.9.9",
|
||||
]
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_v2" "vpc_router" {
|
||||
name = local.name
|
||||
external_network_id = data.openstack_networking_network_v2.floating_ip_pool.network_id
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "vpc_router_interface" {
|
||||
router_id = openstack_networking_router_v2.vpc_router.id
|
||||
subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "vpc_secgroup" {
|
||||
name = local.name
|
||||
description = "Constellation VPC security group"
|
||||
|
||||
rule {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
ip_protocol = "icmp"
|
||||
self = true
|
||||
}
|
||||
|
||||
rule {
|
||||
from_port = 1
|
||||
to_port = 65535
|
||||
ip_protocol = "udp"
|
||||
cidr = local.cidr_vpc_subnet_nodes
|
||||
}
|
||||
|
||||
rule {
|
||||
from_port = 1
|
||||
to_port = 65535
|
||||
ip_protocol = "tcp"
|
||||
cidr = local.cidr_vpc_subnet_nodes
|
||||
}
|
||||
|
||||
rule {
|
||||
from_port = local.ports_node_range_start
|
||||
to_port = local.ports_node_range_end
|
||||
ip_protocol = "tcp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
rule {
|
||||
from_port = local.ports_node_range_start
|
||||
to_port = local.ports_node_range_end
|
||||
ip_protocol = "udp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
dynamic "rule" {
|
||||
for_each = flatten([
|
||||
local.ports_kubernetes,
|
||||
local.ports_bootstrapper,
|
||||
local.ports_konnectivity,
|
||||
local.ports_verify,
|
||||
local.ports_recovery,
|
||||
var.debug ? [local.ports_debugd] : [],
|
||||
])
|
||||
content {
|
||||
from_port = rule.value
|
||||
to_port = rule.value
|
||||
ip_protocol = "tcp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "instance_group" {
|
||||
source = "./modules/instance_group"
|
||||
for_each = var.node_groups
|
||||
base_name = local.name
|
||||
node_group_name = each.key
|
||||
role = each.value.role
|
||||
initial_count = each.value.initial_count
|
||||
disk_size = each.value.state_disk_size
|
||||
state_disk_type = each.value.state_disk_type
|
||||
availability_zone = each.value.zone
|
||||
image_id = openstack_images_image_v2.constellation_os_image.image_id
|
||||
flavor_id = each.value.flavor_id
|
||||
security_groups = [openstack_compute_secgroup_v2.vpc_secgroup.id]
|
||||
tags = local.tags
|
||||
uid = local.uid
|
||||
network_id = openstack_networking_network_v2.vpc_network.id
|
||||
init_secret_hash = local.initSecretHash
|
||||
identity_internal_url = local.identity_internal_url
|
||||
openstack_username = var.openstack_username
|
||||
openstack_password = var.openstack_password
|
||||
openstack_user_domain_name = var.openstack_user_domain_name
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "public_ip" {
|
||||
pool = data.openstack_networking_network_v2.floating_ip_pool.name
|
||||
description = "Public ip for first control plane node"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "public_ip_associate" {
|
||||
floating_ip = openstack_networking_floatingip_v2.public_ip.address
|
||||
instance_id = module.instance_group["control_plane_default"].instance_ids.0
|
||||
depends_on = [
|
||||
openstack_networking_router_v2.vpc_router,
|
||||
openstack_networking_router_interface_v2.vpc_router_interface,
|
||||
]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.instance_group_control_plane
|
||||
to = module.instance_group["control_plane_default"]
|
||||
}
|
||||
|
||||
moved {
|
||||
from = module.instance_group_worker
|
||||
to = module.instance_group["worker_default"]
|
||||
}
|
||||
|
||||
# TODO(malt3): get LoadBalancer API enabled in the test environment
|
||||
# resource "openstack_lb_loadbalancer_v2" "loadbalancer" {
|
||||
# name = local.name
|
||||
# description = "Constellation load balancer"
|
||||
# vip_subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# }
|
||||
|
||||
|
||||
# resource "openstack_networking_floatingip_v2" "loadbalancer_ip" {
|
||||
# pool = data.openstack_networking_network_v2.floating_ip_pool.name
|
||||
# description = "Loadbalancer ip for ${local.name}"
|
||||
# tags = local.tags
|
||||
# }
|
||||
|
||||
# module "loadbalancer_kube" {
|
||||
# source = "./modules/loadbalancer"
|
||||
# name = "${local.name}-kube"
|
||||
# member_ips = module.instance_group_control_plane.ips.value
|
||||
# loadbalancer_id = openstack_lb_loadbalancer_v2.loadbalancer.id
|
||||
# subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# port = local.ports_kubernetes
|
||||
# }
|
||||
|
||||
# module "loadbalancer_boot" {
|
||||
# source = "./modules/loadbalancer"
|
||||
# name = "${local.name}-boot"
|
||||
# member_ips = module.instance_group_control_plane.ips
|
||||
# loadbalancer_id = openstack_lb_loadbalancer_v2.loadbalancer.id
|
||||
# subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# port = local.ports_bootstrapper
|
||||
# }
|
||||
|
||||
# module "loadbalancer_verify" {
|
||||
# source = "./modules/loadbalancer"
|
||||
# name = "${local.name}-verify"
|
||||
# member_ips = module.instance_group_control_plane.ips
|
||||
# loadbalancer_id = openstack_lb_loadbalancer_v2.loadbalancer.id
|
||||
# subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# port = local.ports_verify
|
||||
# }
|
||||
|
||||
# module "loadbalancer_konnectivity" {
|
||||
# source = "./modules/loadbalancer"
|
||||
# name = "${local.name}-konnectivity"
|
||||
# member_ips = module.instance_group_control_plane.ips
|
||||
# loadbalancer_id = openstack_lb_loadbalancer_v2.loadbalancer.id
|
||||
# subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# port = local.ports_konnectivity
|
||||
# }
|
||||
|
||||
# module "loadbalancer_recovery" {
|
||||
# source = "./modules/loadbalancer"
|
||||
# name = "${local.name}-recovery"
|
||||
# member_ips = module.instance_group_control_plane.ips
|
||||
# loadbalancer_id = openstack_lb_loadbalancer_v2.loadbalancer.id
|
||||
# subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# port = local.ports_recovery
|
||||
# }
|
||||
|
||||
# module "loadbalancer_debugd" {
|
||||
# count = var.debug ? 1 : 0 // only deploy debugd in debug mode
|
||||
# source = "./modules/loadbalancer"
|
||||
# name = "${local.name}-debugd"
|
||||
# member_ips = module.instance_group_control_plane.ips
|
||||
# loadbalancer_id = openstack_lb_loadbalancer_v2.loadbalancer.id
|
||||
# subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
|
||||
# port = local.ports_debugd
|
||||
# }
|
|
@ -1,65 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "1.52.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
tags = distinct(sort(concat(var.tags, ["constellation-role-${var.role}"], ["constellation-node-group-${var.node_group_name}"])))
|
||||
group_uid = random_id.uid.hex
|
||||
name = "${var.base_name}-${var.role}-${local.group_uid}"
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
# TODO(malt3): get this API enabled in the test environment
|
||||
# resource "openstack_compute_servergroup_v2" "instance_group" {
|
||||
# name = local.name
|
||||
# policies = ["soft-anti-affinity"]
|
||||
# }
|
||||
|
||||
resource "openstack_compute_instance_v2" "instance_group_member" {
|
||||
name = "${local.name}-${count.index}"
|
||||
count = var.initial_count
|
||||
image_id = var.image_id
|
||||
flavor_id = var.flavor_id
|
||||
security_groups = var.security_groups
|
||||
tags = local.tags
|
||||
# TODO(malt3): get this API enabled in the test environment
|
||||
# scheduler_hints {
|
||||
# group = openstack_compute_servergroup_v2.instance_group.id
|
||||
# }
|
||||
network {
|
||||
uuid = var.network_id
|
||||
}
|
||||
block_device {
|
||||
uuid = var.image_id
|
||||
source_type = "image"
|
||||
destination_type = "local"
|
||||
boot_index = 0
|
||||
delete_on_termination = true
|
||||
}
|
||||
block_device {
|
||||
source_type = "blank"
|
||||
destination_type = "volume"
|
||||
volume_size = var.disk_size
|
||||
volume_type = var.state_disk_type
|
||||
boot_index = 1
|
||||
delete_on_termination = true
|
||||
}
|
||||
metadata = {
|
||||
constellation-role = var.role
|
||||
constellation-uid = var.uid
|
||||
constellation-init-secret-hash = var.init_secret_hash
|
||||
openstack-auth-url = var.identity_internal_url
|
||||
openstack-username = var.openstack_username
|
||||
openstack-password = var.openstack_password
|
||||
openstack-user-domain-name = var.openstack_user_domain_name
|
||||
}
|
||||
availability_zone_hints = var.availability_zone
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
output "instance_group" {
|
||||
value = local.name
|
||||
}
|
||||
|
||||
output "ips" {
|
||||
value = openstack_compute_instance_v2.instance_group_member.*.access_ip_v4
|
||||
}
|
||||
|
||||
output "instance_ids" {
|
||||
value = openstack_compute_instance_v2.instance_group_member.*.id
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
variable "node_group_name" {
|
||||
type = string
|
||||
description = "Constellation name for the node group (used for configuration and CSP-independent naming)."
|
||||
}
|
||||
|
||||
variable "base_name" {
|
||||
type = string
|
||||
description = "Base name of the instance group."
|
||||
}
|
||||
|
||||
variable "uid" {
|
||||
type = string
|
||||
description = "Unique ID of the Constellation."
|
||||
}
|
||||
|
||||
variable "role" {
|
||||
type = string
|
||||
description = "The role of the instance group."
|
||||
validation {
|
||||
condition = contains(["control-plane", "worker"], var.role)
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "initial_count" {
|
||||
type = number
|
||||
description = "Number of instances in the instance group."
|
||||
}
|
||||
|
||||
variable "image_id" {
|
||||
type = string
|
||||
description = "Image ID for the nodes."
|
||||
}
|
||||
|
||||
variable "flavor_id" {
|
||||
type = string
|
||||
description = "Flavor ID (machine type) to use for the nodes."
|
||||
}
|
||||
|
||||
variable "security_groups" {
|
||||
type = list(string)
|
||||
description = "Security groups to place the nodes in."
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = list(string)
|
||||
description = "Tags to attach to each node."
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
type = number
|
||||
description = "Disk size for the nodes, in GiB."
|
||||
}
|
||||
|
||||
variable "state_disk_type" {
|
||||
type = string
|
||||
description = "Disk/volume type to be used."
|
||||
}
|
||||
|
||||
variable "availability_zone" {
|
||||
type = string
|
||||
description = "The availability zone to deploy the nodes in."
|
||||
}
|
||||
|
||||
variable "network_id" {
|
||||
type = string
|
||||
description = "Network ID to attach each node to."
|
||||
}
|
||||
|
||||
variable "init_secret_hash" {
|
||||
type = string
|
||||
description = "Hash of the init secret."
|
||||
}
|
||||
|
||||
variable "identity_internal_url" {
|
||||
type = string
|
||||
description = "Internal URL of the Identity service."
|
||||
}
|
||||
|
||||
variable "openstack_user_domain_name" {
|
||||
type = string
|
||||
description = "OpenStack user domain name."
|
||||
}
|
||||
|
||||
variable "openstack_username" {
|
||||
type = string
|
||||
description = "OpenStack user name."
|
||||
}
|
||||
|
||||
variable "openstack_password" {
|
||||
type = string
|
||||
description = "OpenStack password."
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "1.52.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_lb_listener_v2" "listener" {
|
||||
name = var.name
|
||||
protocol = "TCP"
|
||||
protocol_port = var.port
|
||||
loadbalancer_id = var.loadbalancer_id
|
||||
}
|
||||
|
||||
resource "openstack_lb_pool_v2" "pool" {
|
||||
name = var.name
|
||||
protocol = "TCP"
|
||||
lb_method = "ROUND_ROBIN"
|
||||
listener_id = openstack_lb_listener_v2.listener.id
|
||||
}
|
||||
|
||||
resource "openstack_lb_member_v2" "member" {
|
||||
count = length(var.member_ips)
|
||||
name = format("${var.name}-member-%02d", count.index + 1)
|
||||
address = var.member_ips[count.index]
|
||||
protocol_port = var.port
|
||||
pool_id = openstack_lb_pool_v2.pool.id
|
||||
subnet_id = var.subnet_id
|
||||
}
|
||||
|
||||
resource "openstack_lb_monitor_v2" "k8s_api" {
|
||||
name = var.name
|
||||
pool_id = openstack_lb_pool_v2.pool.id
|
||||
type = "TCP"
|
||||
delay = 2
|
||||
timeout = 2
|
||||
max_retries = 2
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
variable "name" {
|
||||
type = string
|
||||
description = "Base name of the load balancer rule."
|
||||
}
|
||||
|
||||
variable "member_ips" {
|
||||
type = list(string)
|
||||
description = "The IP addresses of the members of the load balancer pool."
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "loadbalancer_id" {
|
||||
type = string
|
||||
description = "The ID of the load balancer."
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
type = string
|
||||
description = "The ID of the members subnet."
|
||||
}
|
||||
|
||||
variable "port" {
|
||||
type = number
|
||||
description = "The port on which to listen for incoming traffic."
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
output "out_of_cluster_endpoint" {
|
||||
value = openstack_networking_floatingip_v2.public_ip.address
|
||||
}
|
||||
|
||||
output "in_cluster_endpoint" {
|
||||
value = openstack_networking_floatingip_v2.public_ip.address
|
||||
}
|
||||
|
||||
output "api_server_cert_sans" {
|
||||
value = sort(concat([openstack_networking_floatingip_v2.public_ip.address], var.custom_endpoint == "" ? [] : [var.custom_endpoint]))
|
||||
}
|
||||
|
||||
output "uid" {
|
||||
value = local.uid
|
||||
}
|
||||
|
||||
output "initSecret" {
|
||||
value = random_password.initSecret.result
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = local.name
|
||||
}
|
||||
|
||||
output "ip_cidr_nodes" {
|
||||
value = local.cidr_vpc_subnet_nodes
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
variable "node_groups" {
|
||||
type = map(object({
|
||||
role = string
|
||||
initial_count = number // number of instances in the node group
|
||||
flavor_id = string // flavor (machine type) to use for instances
|
||||
state_disk_size = number // size of state disk (GiB)
|
||||
state_disk_type = string // type of state disk. Can be 'standard' or 'premium'
|
||||
zone = string // availability zone
|
||||
}))
|
||||
|
||||
validation {
|
||||
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
|
||||
description = "A map of node group names to node group configurations."
|
||||
}
|
||||
|
||||
variable "cloud" {
|
||||
type = string
|
||||
default = null
|
||||
description = "The cloud to use within the OpenStack \"clouds.yaml\" file. Optional. If not set, environment variables are used."
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
default = "constell"
|
||||
description = "Base name of the cluster."
|
||||
}
|
||||
|
||||
variable "image_url" {
|
||||
type = string
|
||||
description = "The image to use for cluster nodes."
|
||||
}
|
||||
|
||||
variable "direct_download" {
|
||||
type = bool
|
||||
description = "If enabled, downloads OS image directly from source URL to OpenStack. Otherwise, downloads image to local machine and uploads to OpenStack."
|
||||
}
|
||||
|
||||
variable "floating_ip_pool_id" {
|
||||
type = string
|
||||
description = "The pool (network name) to use for floating IPs."
|
||||
}
|
||||
|
||||
variable "openstack_user_domain_name" {
|
||||
type = string
|
||||
description = "OpenStack user domain name."
|
||||
}
|
||||
|
||||
variable "openstack_username" {
|
||||
type = string
|
||||
description = "OpenStack user name."
|
||||
}
|
||||
|
||||
variable "openstack_password" {
|
||||
type = string
|
||||
description = "OpenStack password."
|
||||
}
|
||||
|
||||
variable "debug" {
|
||||
type = bool
|
||||
default = false
|
||||
description = "Enable debug mode. This opens up a debugd port that can be used to deploy a custom bootstrapper."
|
||||
}
|
||||
|
||||
variable "custom_endpoint" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/dmacvicar/libvirt" {
|
||||
version = "0.7.1"
|
||||
constraints = "0.7.1"
|
||||
hashes = [
|
||||
"h1:1yEJVPVFkRkbRY63+sFRAWau/eJ0xlecHWLCV8spkWU=",
|
||||
"h1:AJn6IML1iiq9oIUdDQTDApMvsfSKfMncF4RoKnhpNaY=",
|
||||
"h1:G114r+ESpxpMCnBxFXZZ3+HktoNK4WXAJ5M3GRwvgBQ=",
|
||||
"h1:ZG+KVAKVm++wfWnGdc8QIFn1LHRycUnmYibMg4REQyk=",
|
||||
"h1:rDm9KgxNWuhdTCJpfepeTzCB/b24bKrOMN57637RZtU=",
|
||||
"zh:1c59f2ab68da6326637ee8b03433e84af76b3e3562f251a7f2aa239a7b262a8d",
|
||||
"zh:236e24ecf036e99d9d1e2081a39dc9cb4b8993850a37141a1449f20750f883d6",
|
||||
"zh:4519c22b1f00c1d37d60ac6c2cb7ad5ab9dbcd44a80b4f61e68aacb54eae017d",
|
||||
"zh:54de4e3c979c32af1dc71ec2846912f669a28bdb0990e8a3c1fb8fea4ede7b61",
|
||||
"zh:6270a757bcf4e1f9efe47726cf0caefba30a25e59d151103cf03d1656325783c",
|
||||
"zh:68b8586d5b29c0a1cb7c608a309b38db911449c072d60eee9e40e01881f1c23a",
|
||||
"zh:724ba2290fea704714378e9363541420c36091e790c7f39150cde8987d4e0754",
|
||||
"zh:7b6860c92376cdad98273aab4bea62546622e08f50733e4b2e58a7a859d3b49d",
|
||||
"zh:986a0a4f8d9511c64bcac8010337deb43110b4c2f91969b2491fd9edc290b60e",
|
||||
"zh:aff0f6f24d69cd97a44cd6059edaf355769fbb8a7643a6db4d52c9a94f98e194",
|
||||
"zh:c46ca3f8384d06c13a7ed3d4b83c65b4f8dccbf9d5f624843b68d176add5c5c2",
|
||||
"zh:ef310534e7d38153aca4ce31655b52a6e6c4d76f32e49732c96b62e9de1ee843",
|
||||
"zh:f1566b094f4267ef2674889d874962dd41e0cba55251645e16d003c77ca8a19c",
|
||||
"zh:f2e019df7b537069828c5537c481e5b7f41d2404eef6fe5c86702c20900b303d",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.5.1"
|
||||
constraints = "3.5.1"
|
||||
hashes = [
|
||||
"h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=",
|
||||
"h1:6FVyQ/aG6tawPam6B+oFjgdidKd83uG9n7dOSQ66HBA=",
|
||||
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
|
||||
"h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=",
|
||||
"h1:sZ7MTSD4FLekNN2wSNFGpM+5slfvpm5A/NLVZiB7CO0=",
|
||||
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
|
||||
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
|
||||
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
|
||||
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
|
||||
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
|
||||
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
|
||||
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
|
||||
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
|
||||
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
|
||||
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/kreuzwerker/docker" {
|
||||
version = "3.0.2"
|
||||
constraints = "3.0.2"
|
||||
hashes = [
|
||||
"h1:7JHMaq5/GEJg1g+7fc4p+/JDTAlA/gm7CQQlNTOATBc=",
|
||||
"h1:AiY0E7SIA398B4VZwZ7S9CCcZoFLmAF2v5yQCBN0TAU=",
|
||||
"h1:DcRxJArfX6EiATluWeCBW7HoD6usz9fMoTK2U3dmyPk=",
|
||||
"h1:PyVpi9KjpKXU2IlpP6paeoSeCRz59ALOilkF9bo+Xe4=",
|
||||
"h1:RK5HMr5FODIH9IxQGMClB6PS0q2QII9OP/WisO8K9CU=",
|
||||
"h1:Wb5kwP+4FYEpM+oORAsa7UJNtpSi9N6CJxxC45vlIv8=",
|
||||
"h1:XjdpVL61KtTsuPE8swok3GY8A+Bu3TZs8T2DOEpyiXo=",
|
||||
"h1:YZJY8ZVZr30bgLEmsprLE8XWBisw4zzq8G4dEIYUvHM=",
|
||||
"h1:Z1RxP35qnQ3F7W7YIehNBJVgbiKy5h8PKPeQwSB30R4=",
|
||||
"h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=",
|
||||
"h1:f5QLFbvmpIkMnBnGfTXY8RtXOojo82xcE9L2xQDFC04=",
|
||||
"h1:os8pBi4rbtFJJtzNWlcGhOVsz5V9UPJvo+L0wNQFYE8=",
|
||||
"h1:tMiDR/3WQYAwE4Z7Xr1iqJN23z2GNr1ARis9yutVgjw=",
|
||||
"h1:ytKK8fxS0qwNLZUgdm7KBRoyUnDBhWAIsqIaJ/AxFZA=",
|
||||
"zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f",
|
||||
"zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95",
|
||||
"zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138",
|
||||
"zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da",
|
||||
"zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2",
|
||||
"zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06",
|
||||
"zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1",
|
||||
"zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710",
|
||||
"zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005",
|
||||
"zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d",
|
||||
"zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792",
|
||||
"zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387",
|
||||
"zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62",
|
||||
"zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d",
|
||||
]
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
libvirt = {
|
||||
source = "dmacvicar/libvirt"
|
||||
version = "0.7.1"
|
||||
}
|
||||
docker = {
|
||||
source = "kreuzwerker/docker"
|
||||
version = "3.0.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "libvirt" {
|
||||
uri = var.libvirt_uri
|
||||
}
|
||||
|
||||
provider "docker" {
|
||||
host = "unix:///var/run/docker.sock"
|
||||
}
|
||||
|
||||
locals {
|
||||
cidr_vpc_subnet_nodes = "10.42.0.0/22"
|
||||
cidr_vpc_subnet_control_planes = "10.42.1.0/24"
|
||||
cidr_vpc_subnet_worker = "10.42.2.0/24"
|
||||
}
|
||||
|
||||
resource "random_password" "initSecret" {
|
||||
length = 32
|
||||
special = true
|
||||
override_special = "_%@"
|
||||
}
|
||||
resource "docker_image" "qemu_metadata" {
|
||||
name = var.metadata_api_image
|
||||
keep_locally = true
|
||||
}
|
||||
|
||||
resource "docker_container" "qemu_metadata" {
|
||||
name = "${var.name}-qemu-metadata"
|
||||
image = docker_image.qemu_metadata.image_id
|
||||
network_mode = "host"
|
||||
rm = true
|
||||
command = [
|
||||
"--network",
|
||||
"${var.name}-network",
|
||||
"--libvirt-uri",
|
||||
"${var.metadata_libvirt_uri}",
|
||||
"--initsecrethash",
|
||||
"${random_password.initSecret.bcrypt_hash}",
|
||||
]
|
||||
mounts {
|
||||
source = abspath(var.libvirt_socket_path)
|
||||
target = "/var/run/libvirt/libvirt-sock"
|
||||
type = "bind"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module "node_group" {
|
||||
source = "./modules/instance_group"
|
||||
base_name = var.name
|
||||
for_each = var.node_groups
|
||||
node_group_name = each.key
|
||||
role = each.value.role
|
||||
amount = each.value.initial_count
|
||||
state_disk_size = each.value.disk_size
|
||||
vcpus = each.value.vcpus
|
||||
memory = each.value.memory
|
||||
machine = var.machine
|
||||
cidr = each.value.role == "control-plane" ? local.cidr_vpc_subnet_control_planes : local.cidr_vpc_subnet_worker
|
||||
network_id = libvirt_network.constellation.id
|
||||
pool = libvirt_pool.cluster.name
|
||||
boot_mode = var.constellation_boot_mode
|
||||
boot_volume_id = libvirt_volume.constellation_os_image.id
|
||||
kernel_volume_id = local.kernel_volume_id
|
||||
initrd_volume_id = local.initrd_volume_id
|
||||
kernel_cmdline = each.value.role == "control-plane" ? local.kernel_cmdline : var.constellation_cmdline
|
||||
firmware = var.firmware
|
||||
nvram = var.nvram
|
||||
}
|
||||
|
||||
resource "libvirt_pool" "cluster" {
|
||||
name = "${var.name}-storage-pool"
|
||||
type = "dir"
|
||||
path = "/var/lib/libvirt/images"
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "constellation_os_image" {
|
||||
name = "${var.name}-node-image"
|
||||
pool = libvirt_pool.cluster.name
|
||||
source = var.constellation_os_image
|
||||
format = var.image_format
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "constellation_kernel" {
|
||||
name = "${var.name}-kernel"
|
||||
pool = libvirt_pool.cluster.name
|
||||
source = var.constellation_kernel
|
||||
format = "raw"
|
||||
count = var.constellation_boot_mode == "direct-linux-boot" ? 1 : 0
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "constellation_initrd" {
|
||||
name = "${var.name}-initrd"
|
||||
pool = libvirt_pool.cluster.name
|
||||
source = var.constellation_initrd
|
||||
format = "raw"
|
||||
count = var.constellation_boot_mode == "direct-linux-boot" ? 1 : 0
|
||||
}
|
||||
|
||||
resource "libvirt_network" "constellation" {
|
||||
name = "${var.name}-network"
|
||||
mode = "nat"
|
||||
addresses = ["10.42.0.0/16"]
|
||||
dhcp {
|
||||
enabled = true
|
||||
}
|
||||
dns {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
kernel_volume_id = var.constellation_boot_mode == "direct-linux-boot" ? libvirt_volume.constellation_kernel[0].id : null
|
||||
initrd_volume_id = var.constellation_boot_mode == "direct-linux-boot" ? libvirt_volume.constellation_initrd[0].id : null
|
||||
kernel_cmdline = var.constellation_boot_mode == "direct-linux-boot" ? var.constellation_cmdline : null
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
<xsl:stylesheet version="2.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
|
||||
<xsl:output omit-xml-declaration="yes" indent="yes"/>
|
||||
<xsl:template match="node()|@*">
|
||||
<xsl:copy>
|
||||
<xsl:apply-templates select="node()|@*"/>
|
||||
</xsl:copy>
|
||||
</xsl:template>
|
||||
<xsl:template match="os">
|
||||
<os>
|
||||
<xsl:apply-templates select="@*|node()"/>
|
||||
</os>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/os/loader">
|
||||
<xsl:copy>
|
||||
<!--<xsl:apply-templates select="node()|@*"/>-->
|
||||
<xsl:attribute name="secure">
|
||||
<xsl:value-of select="'no'"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="readonly">
|
||||
<xsl:value-of select="'yes'"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="type">
|
||||
<xsl:value-of select="'pflash'"/>
|
||||
</xsl:attribute>
|
||||
<xsl:value-of select="."/>
|
||||
</xsl:copy>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/features">
|
||||
<xsl:copy>
|
||||
<xsl:apply-templates select="node()|@*"/>
|
||||
<xsl:element name ="smm" />
|
||||
</xsl:copy>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/devices/tpm/backend">
|
||||
<xsl:copy>
|
||||
<xsl:apply-templates select="node()|@*"/>
|
||||
<xsl:element name ="active_pcr_banks">
|
||||
<xsl:element name="sha1"></xsl:element>
|
||||
<xsl:element name="sha256"></xsl:element>
|
||||
<xsl:element name="sha384"></xsl:element>
|
||||
<xsl:element name="sha512"></xsl:element>
|
||||
</xsl:element>
|
||||
</xsl:copy>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
|
@ -1,99 +0,0 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
libvirt = {
|
||||
source = "dmacvicar/libvirt"
|
||||
version = "0.7.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.5.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
resource "libvirt_domain" "instance_group" {
|
||||
count = var.amount
|
||||
name = "${var.base_name}-${var.role}-${local.group_uid}-${count.index}"
|
||||
memory = var.memory
|
||||
vcpu = var.vcpus
|
||||
machine = var.machine
|
||||
firmware = local.firmware
|
||||
dynamic "cpu" {
|
||||
for_each = var.boot_mode == "direct-linux-boot" ? [1] : []
|
||||
content {
|
||||
mode = "host-passthrough"
|
||||
}
|
||||
}
|
||||
dynamic "nvram" {
|
||||
for_each = var.boot_mode == "uefi" ? [1] : []
|
||||
content {
|
||||
file = "/var/lib/libvirt/qemu/nvram/${var.role}-${count.index}_VARS.fd"
|
||||
template = var.nvram
|
||||
}
|
||||
}
|
||||
xml {
|
||||
xslt = file("${path.module}/${local.xslt_filename}")
|
||||
}
|
||||
kernel = local.kernel
|
||||
initrd = local.initrd
|
||||
cmdline = local.cmdline
|
||||
tpm {
|
||||
backend_type = "emulator"
|
||||
backend_version = "2.0"
|
||||
}
|
||||
disk {
|
||||
volume_id = element(libvirt_volume.boot_volume.*.id, count.index)
|
||||
}
|
||||
disk {
|
||||
volume_id = element(libvirt_volume.state_volume.*.id, count.index)
|
||||
}
|
||||
network_interface {
|
||||
network_id = var.network_id
|
||||
hostname = "${var.role}-${count.index}"
|
||||
addresses = [cidrhost(var.cidr, local.ip_range_start + count.index)]
|
||||
wait_for_lease = true
|
||||
}
|
||||
console {
|
||||
type = "pty"
|
||||
target_port = "0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "boot_volume" {
|
||||
count = var.amount
|
||||
name = "constellation-${var.role}-${local.group_uid}-${count.index}-boot"
|
||||
pool = var.pool
|
||||
base_volume_id = var.boot_volume_id
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
name, # required. Allow legacy scale sets to keep their old names
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "state_volume" {
|
||||
count = var.amount
|
||||
name = "constellation-${var.role}-${local.group_uid}-${count.index}-state"
|
||||
pool = var.pool
|
||||
size = local.state_disk_size_byte
|
||||
format = "qcow2"
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
name, # required. Allow legacy scale sets to keep their old names
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "random_id" "uid" {
|
||||
byte_length = 4
|
||||
}
|
||||
|
||||
locals {
|
||||
group_uid = random_id.uid.hex
|
||||
state_disk_size_byte = 1073741824 * var.state_disk_size
|
||||
ip_range_start = 100
|
||||
kernel = var.boot_mode == "direct-linux-boot" ? var.kernel_volume_id : null
|
||||
initrd = var.boot_mode == "direct-linux-boot" ? var.initrd_volume_id : null
|
||||
cmdline = var.boot_mode == "direct-linux-boot" ? [{ "_" = var.kernel_cmdline }] : null
|
||||
firmware = var.boot_mode == "uefi" ? var.firmware : null
|
||||
xslt_filename = var.boot_mode == "direct-linux-boot" ? "tdx_domain.xsl" : "domain.xsl"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
output "instance_ips" {
|
||||
value = flatten(libvirt_domain.instance_group[*].network_interface[*].addresses[*])
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
<xsl:stylesheet version="2.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
|
||||
<xsl:output omit-xml-declaration="yes" indent="yes"/>
|
||||
<xsl:template match="node()|@*">
|
||||
<xsl:copy>
|
||||
<xsl:apply-templates select="node()|@*"/>
|
||||
</xsl:copy>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain">
|
||||
<xsl:copy>
|
||||
<xsl:apply-templates select="node()|@*"/>
|
||||
<xsl:element name ="clock">
|
||||
<xsl:attribute name="offset">
|
||||
<xsl:value-of select="'utc'"/>
|
||||
</xsl:attribute>
|
||||
<xsl:element name ="timer">
|
||||
<xsl:attribute name="name">
|
||||
<xsl:value-of select="'hpet'"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="present">
|
||||
<xsl:value-of select="'no'"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
</xsl:element>
|
||||
<xsl:element name ="on_poweroff"><xsl:text>destroy</xsl:text></xsl:element>
|
||||
<xsl:element name ="on_reboot"><xsl:text>restart</xsl:text></xsl:element>
|
||||
<xsl:element name ="on_crash"><xsl:text>destroy</xsl:text></xsl:element>
|
||||
<xsl:element name ="pm">
|
||||
<xsl:element name ="suspend-to-mem">
|
||||
<xsl:attribute name="enable">
|
||||
<xsl:value-of select="'no'"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
<xsl:element name ="suspend-to-disk">
|
||||
<xsl:attribute name="enable">
|
||||
<xsl:value-of select="'no'"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
</xsl:element>
|
||||
<xsl:element name ="allowReboot">
|
||||
<xsl:attribute name="value">
|
||||
<xsl:value-of select="'no'"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
<xsl:element name ="launchSecurity">
|
||||
<xsl:attribute name="type">
|
||||
<xsl:value-of select="'tdx'"/>
|
||||
</xsl:attribute>
|
||||
<xsl:element name ="policy"><xsl:text>0x10000001</xsl:text></xsl:element>
|
||||
<xsl:element name ="Quote-Generation-Service"><xsl:text>vsock:2:4050</xsl:text></xsl:element>
|
||||
</xsl:element>
|
||||
<xsl:element name ="qemu:commandline" >
|
||||
<xsl:element name ="qemu:arg">
|
||||
<xsl:attribute name="value">
|
||||
<xsl:value-of select="'-cpu'"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
<xsl:element name ="qemu:arg">
|
||||
<xsl:attribute name="value">
|
||||
<xsl:value-of select="'host,-kvm-steal-time'"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
</xsl:element>
|
||||
</xsl:copy>
|
||||
</xsl:template>
|
||||
<xsl:template match="os">
|
||||
<os>
|
||||
<xsl:apply-templates select="@*|node()"/>
|
||||
</os>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/os/loader">
|
||||
<loader>
|
||||
<xsl:apply-templates select="node()"/>
|
||||
</loader>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/features">
|
||||
<features>
|
||||
<acpi/>
|
||||
<apic/>
|
||||
<ioapic driver="qemu"/>
|
||||
</features>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/vcpu">
|
||||
<vcpu placement="static"><xsl:apply-templates select="@*|node()"/></vcpu>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/devices/console">
|
||||
<console type="pty">
|
||||
<target type="virtio" port="1" />
|
||||
</console>
|
||||
</xsl:template>
|
||||
<xsl:template match="/domain/devices/graphics"></xsl:template>
|
||||
<xsl:template match="/domain/devices/rng"></xsl:template>
|
||||
</xsl:stylesheet>
|
|
@ -1,95 +0,0 @@
|
|||
variable "amount" {
|
||||
type = number
|
||||
description = "amount of nodes"
|
||||
}
|
||||
|
||||
variable "vcpus" {
|
||||
type = number
|
||||
description = "amount of vcpus per instance"
|
||||
}
|
||||
|
||||
variable "memory" {
|
||||
type = number
|
||||
description = "amount of memory per instance (MiB)"
|
||||
}
|
||||
|
||||
variable "state_disk_size" {
|
||||
type = number
|
||||
description = "size of state disk (GiB)"
|
||||
}
|
||||
|
||||
variable "cidr" {
|
||||
type = string
|
||||
description = "subnet to use for dhcp"
|
||||
}
|
||||
|
||||
variable "network_id" {
|
||||
type = string
|
||||
description = "id of the network to use"
|
||||
}
|
||||
|
||||
variable "pool" {
|
||||
type = string
|
||||
description = "name of the storage pool to use"
|
||||
}
|
||||
|
||||
variable "boot_mode" {
|
||||
type = string
|
||||
description = "boot mode. Can be 'uefi' or 'direct-linux-boot'"
|
||||
validation {
|
||||
condition = can(regex("^(uefi|direct-linux-boot)$", var.boot_mode))
|
||||
error_message = "boot_mode must be 'uefi' or 'direct-linux-boot'"
|
||||
}
|
||||
}
|
||||
|
||||
variable "boot_volume_id" {
|
||||
type = string
|
||||
description = "id of the constellation boot disk"
|
||||
}
|
||||
|
||||
variable "kernel_volume_id" {
|
||||
type = string
|
||||
description = "id of the constellation kernel volume"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "initrd_volume_id" {
|
||||
type = string
|
||||
description = "id of the constellation initrd volume"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "kernel_cmdline" {
|
||||
type = string
|
||||
description = "kernel cmdline"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "role" {
|
||||
type = string
|
||||
description = "role of the node in the constellation. either 'control-plane' or 'worker'"
|
||||
}
|
||||
|
||||
variable "machine" {
|
||||
type = string
|
||||
description = "machine type. use 'q35' for secure boot and 'pc' for non secure boot. See 'qemu-system-x86_64 -machine help'"
|
||||
}
|
||||
|
||||
variable "firmware" {
|
||||
type = string
|
||||
description = "path to UEFI firmware file. Ignored for direct-linux-boot."
|
||||
}
|
||||
|
||||
variable "nvram" {
|
||||
type = string
|
||||
description = "path to UEFI NVRAM template file. Used for secure boot."
|
||||
}
|
||||
variable "base_name" {
|
||||
type = string
|
||||
description = "name prefix of the cluster VMs"
|
||||
}
|
||||
|
||||
variable "node_group_name" {
|
||||
type = string
|
||||
description = "name of the node group"
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
output "out_of_cluster_endpoint" {
|
||||
value = module.node_group["control_plane_default"].instance_ips[0]
|
||||
}
|
||||
|
||||
output "in_cluster_endpoint" {
|
||||
value = module.node_group["control_plane_default"].instance_ips[0]
|
||||
}
|
||||
|
||||
output "api_server_cert_sans" {
|
||||
value = sort(concat([module.node_group["control_plane_default"].instance_ips[0]], var.custom_endpoint == "" ? [] : [var.custom_endpoint]))
|
||||
}
|
||||
|
||||
output "uid" {
|
||||
value = "qemu" // placeholder
|
||||
}
|
||||
|
||||
output "initSecret" {
|
||||
value = random_password.initSecret.result
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "validate_constellation_kernel" {
|
||||
value = null
|
||||
precondition {
|
||||
condition = var.constellation_boot_mode != "direct-linux-boot" || length(var.constellation_kernel) > 0
|
||||
error_message = "constellation_kernel must be set if constellation_boot_mode is 'direct-linux-boot'"
|
||||
}
|
||||
}
|
||||
|
||||
output "validate_constellation_initrd" {
|
||||
value = null
|
||||
precondition {
|
||||
condition = var.constellation_boot_mode != "direct-linux-boot" || length(var.constellation_initrd) > 0
|
||||
error_message = "constellation_initrd must be set if constellation_boot_mode is 'direct-linux-boot'"
|
||||
}
|
||||
}
|
||||
|
||||
output "validate_constellation_cmdline" {
|
||||
value = null
|
||||
precondition {
|
||||
condition = var.constellation_boot_mode != "direct-linux-boot" || length(var.constellation_cmdline) > 0
|
||||
error_message = "constellation_cmdline must be set if constellation_boot_mode is 'direct-linux-boot'"
|
||||
}
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = "${var.name}-qemu" // placeholder, as per "uid" output
|
||||
}
|
||||
|
||||
output "ip_cidr_nodes" {
|
||||
value = local.cidr_vpc_subnet_nodes
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
variable "node_groups" {
|
||||
type = map(object({
|
||||
role = string
|
||||
initial_count = number // number of instances in the node group
|
||||
disk_size = number // size of state disk (GiB)
|
||||
vcpus = number
|
||||
memory = number // amount of memory per instance (MiB)
|
||||
}))
|
||||
validation {
|
||||
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
|
||||
error_message = "The role has to be 'control-plane' or 'worker'."
|
||||
}
|
||||
|
||||
description = "A map of node group names to node group configurations."
|
||||
}
|
||||
|
||||
variable "machine" {
|
||||
type = string
|
||||
default = "q35"
|
||||
description = "machine type. use 'q35' for secure boot and 'pc' for non secure boot. See 'qemu-system-x86_64 -machine help'"
|
||||
}
|
||||
|
||||
variable "libvirt_uri" {
|
||||
type = string
|
||||
description = "libvirt socket uri"
|
||||
}
|
||||
|
||||
variable "constellation_boot_mode" {
|
||||
type = string
|
||||
description = "constellation boot mode. Can be 'uefi' or 'direct-linux-boot'"
|
||||
validation {
|
||||
condition = anytrue([
|
||||
var.constellation_boot_mode == "uefi",
|
||||
var.constellation_boot_mode == "direct-linux-boot",
|
||||
])
|
||||
error_message = "constellation_boot_mode must be 'uefi' or 'direct-linux-boot'"
|
||||
}
|
||||
}
|
||||
|
||||
variable "constellation_os_image" {
|
||||
type = string
|
||||
description = "constellation OS file path"
|
||||
}
|
||||
|
||||
variable "constellation_kernel" {
|
||||
type = string
|
||||
description = "constellation Kernel file path"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "constellation_initrd" {
|
||||
type = string
|
||||
description = "constellation initrd file path"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "constellation_cmdline" {
|
||||
type = string
|
||||
description = "constellation kernel cmdline"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_format" {
|
||||
type = string
|
||||
default = "qcow2"
|
||||
description = "image format"
|
||||
}
|
||||
variable "firmware" {
|
||||
type = string
|
||||
default = "/usr/share/OVMF/OVMF_CODE.secboot.fd"
|
||||
description = "path to UEFI firmware file. Use \"OVMF_CODE_4M.ms.fd\" on Ubuntu and \"OVMF_CODE.fd\" or \"OVMF_CODE.secboot.fd\" on Fedora."
|
||||
}
|
||||
|
||||
variable "nvram" {
|
||||
type = string
|
||||
description = "path to UEFI NVRAM template file. Used for secure boot."
|
||||
}
|
||||
|
||||
variable "metadata_api_image" {
|
||||
type = string
|
||||
description = "container image of the QEMU metadata api server"
|
||||
}
|
||||
|
||||
variable "metadata_libvirt_uri" {
|
||||
type = string
|
||||
description = "libvirt uri for the metadata api server"
|
||||
}
|
||||
|
||||
variable "libvirt_socket_path" {
|
||||
type = string
|
||||
description = "path to libvirt socket in case of unix socket"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
default = "constellation"
|
||||
description = "name prefix of the cluster VMs"
|
||||
}
|
||||
|
||||
variable "custom_endpoint" {
|
||||
type = string
|
||||
default = ""
|
||||
description = "Custom endpoint to use for the Kubernetes apiserver. If not set, the default endpoint will be used."
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue