AB#2436: Initial support for create/terminate AWS NitroTPM instances

* Add .DS_Store to .gitignore

* Add AWS to config / supported instance types

* Move AWS terraform skeleton to cli/internal/terraform

* Move currently unused IAM to hack/terraform/aws

* Print supported AWS instance types when AWS dev flag is set

* Block everything aTLS related (e.g. init, verify) until AWS attestation is available

* Create/Terminate AWS dev cluster when dev flag is set

* Restrict Nitro instances to NitroTPM supported specifically

* Pin zone for subnets

This is not great for HA, but for now we need to avoid the two subnets
ending up in different zones, causing the load balancer to not be able
to connect to the targets.

Should be replaced later with a better implementation that just uses
multiple subnets within the same region dynamically
based on # of nodes or similar.

* Add AWS/GCP to Terraform TestLoader unit test

* Add uid tag and create log group

Co-authored-by: Daniel Weiße <dw@edgeless.systems>
Co-authored-by: Malte Poll <mp@edgeless.systems>
This commit is contained in:
Nils Hanke 2022-10-21 12:24:18 +02:00 committed by GitHub
parent 07f02a442c
commit 04c4cff9f6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 940 additions and 314 deletions

View file

@ -1,179 +0,0 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
# Configure the AWS Provider
provider "aws" {
region = var.region
}
resource "random_id" "uid" {
byte_length = 8
}
resource "aws_iam_instance_profile" "control_plane_instance_profile" {
name = "${var.name_prefix}_control_plane_instance_profile"
role = aws_iam_role.control_plane_role.name
}
resource "aws_iam_role" "control_plane_role" {
name = "${var.name_prefix}_control_plane_role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "control_plane_policy" {
name = "${var.name_prefix}_control_plane_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVolumes",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateRoute",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:RevokeSecurityGroupIngress",
"ec2:DescribeVpcs",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
"iam:CreateServiceLinkedRole",
"kms:DescribeKey"
],
"Resource": [
"*"
]
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "attach_control_plane_policy" {
role = aws_iam_role.control_plane_role.name
policy_arn = aws_iam_policy.control_plane_policy.arn
}
resource "aws_iam_instance_profile" "worker_node_instance_profile" {
name = "${var.name_prefix}_worker_node_instance_profile"
role = aws_iam_role.control_plane_role.name
}
resource "aws_iam_role" "worker_node_role" {
name = "${var.name_prefix}_worker_node_role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "worker_node_policy" {
name = "${var.name_prefix}_worker_node_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "attach_worker_node_policy" {
role = aws_iam_role.worker_node_role.name
policy_arn = aws_iam_policy.worker_node_policy.arn
}

View file

@ -1,7 +0,0 @@
output "control_plane_instance_profile" {
value = aws_iam_instance_profile.control_plane_instance_profile.name
}
output "worker_nodes_instance_profile" {
value = aws_iam_instance_profile.worker_node_instance_profile.name
}

View file

@ -1,10 +0,0 @@
variable "name_prefix" {
type = string
description = "Prefix for all resources"
}
variable "region" {
type = string
description = "AWS region"
default = "us-east-2"
}

View file

@ -1,189 +0,0 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
random = {
source = "hashicorp/random"
version = "3.4.3"
}
}
}
# Configure the AWS Provider
provider "aws" {
region = var.region
}
locals {
uid = random_id.uid.hex
name = "${var.name}-${local.uid}"
tag = "constellation-${local.uid}"
ports_node_range = "30000-32767"
ports_ssh = "22"
ports_kubernetes = "6443"
ports_bootstrapper = "9000"
ports_konnectivity = "8132"
ports_verify = "30081"
ports_debugd = "4000"
cidr_vpc_subnet_nodes = "192.168.178.0/24"
}
resource "random_id" "uid" {
byte_length = 4
}
resource "aws_vpc" "vpc" {
cidr_block = "192.168.0.0/16"
tags = {
Name = "${local.name}-vpc"
}
}
resource "aws_subnet" "main" {
vpc_id = aws_vpc.vpc.id
cidr_block = local.cidr_vpc_subnet_nodes
tags = {
Name = "${local.name}-subnet"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "${local.name}-gateway"
}
}
resource "aws_security_group" "security_group" {
name = local.name
vpc_id = aws_vpc.vpc.id
description = "Security group for ${local.name}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
description = "Allow all outbound traffic"
}
ingress {
from_port = split("-", local.ports_node_range)[0]
to_port = split("-", local.ports_node_range)[1]
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "K8s node ports"
}
ingress {
from_port = local.ports_bootstrapper
to_port = local.ports_bootstrapper
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "bootstrapper"
}
ingress {
from_port = local.ports_kubernetes
to_port = local.ports_kubernetes
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "kubernetes"
}
ingress {
from_port = local.ports_konnectivity
to_port = local.ports_konnectivity
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "konnectivity"
}
ingress {
from_port = local.ports_debugd
to_port = local.ports_debugd
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "debugd"
}
}
module "load_balancer_bootstrapper" {
source = "./modules/load_balancer"
name = "${local.name}-bootstrapper"
vpc = aws_vpc.vpc.id
subnet = aws_subnet.main.id
port = local.ports_bootstrapper
}
module "load_balancer_kubernetes" {
source = "./modules/load_balancer"
name = "${local.name}-kubernetes"
vpc = aws_vpc.vpc.id
subnet = aws_subnet.main.id
port = local.ports_kubernetes
}
module "load_balancer_verify" {
source = "./modules/load_balancer"
name = "${local.name}-verify"
vpc = aws_vpc.vpc.id
subnet = aws_subnet.main.id
port = local.ports_verify
}
module "load_balancer_debugd" {
source = "./modules/load_balancer"
name = "${local.name}-debugd"
vpc = aws_vpc.vpc.id
subnet = aws_subnet.main.id
port = local.ports_debugd
}
module "load_balancer_konnectivity" {
source = "./modules/load_balancer"
name = "${local.name}-konnectivity"
vpc = aws_vpc.vpc.id
subnet = aws_subnet.main.id
port = local.ports_konnectivity
}
module "instance_group_control_plane" {
source = "./modules/instance_group"
name = local.name
role = "control-plane"
uid = local.uid
instance_type = var.instance_type
instance_count = var.count_control_plane
image_id = var.ami
disk_size = var.disk_size
target_group_arns = [
module.load_balancer_bootstrapper.target_group_arn,
module.load_balancer_kubernetes.target_group_arn,
module.load_balancer_verify.target_group_arn,
module.load_balancer_debugd.target_group_arn
]
subnetwork = aws_subnet.main.id
iam_instance_profile = var.control_plane_iam_instance_profile
}
module "instance_group_worker_nodes" {
source = "./modules/instance_group"
name = local.name
role = "worker"
uid = local.uid
instance_type = var.instance_type
instance_count = var.count_worker_nodes
image_id = var.ami
disk_size = var.disk_size
subnetwork = aws_subnet.main.id
target_group_arns = []
iam_instance_profile = var.worker_nodes_iam_instance_profile
}

View file

@ -1,58 +0,0 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
locals {
name = "${var.name}-${lower(var.role)}"
}
resource "aws_launch_configuration" "control_plane_launch_config" {
name_prefix = local.name
image_id = var.image_id
instance_type = var.instance_type
iam_instance_profile = var.iam_instance_profile
metadata_options {
http_tokens = "required"
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "control_plane_autoscaling_group" {
name = local.name
launch_configuration = aws_launch_configuration.control_plane_launch_config.name
min_size = 1
max_size = 10
desired_capacity = var.instance_count
vpc_zone_identifier = [var.subnetwork]
target_group_arns = var.target_group_arns
lifecycle {
create_before_destroy = true
}
tag {
key = "Name"
value = local.name
propagate_at_launch = true
}
tag {
key = "constellation-role"
value = var.role
propagate_at_launch = true
}
tag {
key = "constellation-uid"
value = var.uid
propagate_at_launch = true
}
}

View file

@ -1,49 +0,0 @@
variable "name" {
type = string
description = "Base name of the instance group."
}
variable "role" {
type = string
description = "The role of the instance group. Has to be 'ControlPlane' or 'Worker'."
}
variable "uid" {
type = string
description = "UID of the cluster. This is used for tags."
}
variable "instance_type" {
type = string
description = "Instance type for the nodes."
}
variable "instance_count" {
type = number
description = "Number of instances in the instance group."
}
variable "image_id" {
type = string
description = "Image ID for the nodes."
}
variable "disk_size" {
type = number
description = "Disk size for the nodes, in GB."
}
variable "target_group_arns" {
type = list(string)
description = "ARN of the target group."
}
variable "subnetwork" {
type = string
description = "Name of the subnetwork to use."
}
variable "iam_instance_profile" {
type = string
description = "IAM instance profile for the nodes."
}

View file

@ -1,48 +0,0 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
resource "aws_lb" "front_end" {
name = var.name
internal = false
load_balancer_type = "network"
subnets = [var.subnet]
tags = {
Name = "loadbalancer"
}
enable_cross_zone_load_balancing = true
}
resource "aws_lb_target_group" "front_end" {
name = var.name
port = var.port
protocol = "TCP"
vpc_id = var.vpc
health_check {
port = var.port
protocol = "TCP"
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_lb_listener" "front_end" {
load_balancer_arn = aws_lb.front_end.arn
port = var.port
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.front_end.arn
}
}

View file

@ -1,3 +0,0 @@
output "target_group_arn" {
value = aws_lb_target_group.front_end.arn
}

View file

@ -1,19 +0,0 @@
variable "name" {
type = string
description = "Name of the load balancer."
}
variable "port" {
type = string
description = "Port of the load balancer."
}
variable "vpc" {
type = string
description = "ID of the VPC."
}
variable "subnet" {
type = string
description = "ID of the subnets."
}

View file

@ -1,50 +0,0 @@
variable "name" {
type = string
description = "Name of your Constellation"
}
variable "worker_nodes_iam_instance_profile" {
type = string
description = "Name of the IAM instance profile for worker nodes"
}
variable "control_plane_iam_instance_profile" {
type = string
description = "Name of the IAM instance profile for control plane nodes"
}
variable "instance_type" {
type = string
description = "Instance type for worker nodes"
default = "t2.micro"
}
variable "disk_size" {
type = number
description = "Disk size for nodes [GB]"
default = 30
}
variable "count_control_plane" {
type = number
description = "Number of control plane nodes"
default = 1
}
variable "count_worker_nodes" {
type = number
description = "Number of worker nodes"
default = 1
}
variable "ami" {
type = string
description = "AMI ID"
default = "ami-02f3416038bdb17fb" // Ubuntu 22.04 LTS
}
variable "region" {
type = string
description = "AWS region"
default = "us-east-2"
}