terraform: openstack node groups (#1966)

* openstack

* rename to base_name

* fix openstack boot vtpm

* add docs for accessing bootstrapper logs

* rename to initial count
This commit is contained in:
Adrian Stobbe 2023-07-03 16:33:00 +02:00 committed by GitHub
parent d43242a55f
commit c39df2f7da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 141 additions and 125 deletions

View File

@ -171,7 +171,7 @@ func main() {
metadata, helmClient, &kubewaiter.CloudKubeAPIWaiter{},
)
metadataAPI = metadata
openDevice = vtpm.OpenVTPM
fs = afero.NewOsFs()
default:
clusterInitJoiner = &clusterFake{}

View File

@ -327,23 +327,32 @@ func (c *Creator) createOpenStack(ctx context.Context, cl terraformClient, opts
}
vars := terraform.OpenStackClusterVariables{
CommonVariables: terraform.CommonVariables{
Name: opts.Config.Name,
CountControlPlanes: opts.ControlPlaneCount,
CountWorkers: opts.WorkerCount,
StateDiskSizeGB: opts.Config.StateDiskSizeGB,
},
Cloud: opts.Config.Provider.OpenStack.Cloud,
AvailabilityZone: opts.Config.Provider.OpenStack.AvailabilityZone,
Name: opts.Config.Name,
Cloud: toPtr(opts.Config.Provider.OpenStack.Cloud),
FlavorID: opts.Config.Provider.OpenStack.FlavorID,
FloatingIPPoolID: opts.Config.Provider.OpenStack.FloatingIPPoolID,
StateDiskType: opts.Config.Provider.OpenStack.StateDiskType,
ImageURL: opts.image,
DirectDownload: *opts.Config.Provider.OpenStack.DirectDownload,
OpenstackUserDomainName: opts.Config.Provider.OpenStack.UserDomainName,
OpenstackUsername: opts.Config.Provider.OpenStack.Username,
OpenstackPassword: opts.Config.Provider.OpenStack.Password,
Debug: opts.Config.IsDebugCluster(),
NodeGroups: map[string]terraform.OpenStackNodeGroup{
"control_plane_default": {
Role: role.ControlPlane.TFString(),
InitialCount: opts.ControlPlaneCount,
Zone: opts.Config.Provider.OpenStack.AvailabilityZone, // TODO(elchead): make configurable AB#3225
StateDiskType: opts.Config.Provider.OpenStack.StateDiskType,
StateDiskSizeGB: opts.Config.StateDiskSizeGB,
},
"worker_default": {
Role: role.Worker.TFString(),
InitialCount: opts.WorkerCount,
Zone: opts.Config.Provider.OpenStack.AvailabilityZone, // TODO(elchead): make configurable AB#3225
StateDiskType: opts.Config.Provider.OpenStack.StateDiskType,
StateDiskSizeGB: opts.Config.StateDiskSizeGB,
},
},
}
if err := cl.PrepareWorkspace(path.Join("terraform", strings.ToLower(cloudprovider.OpenStack.String())), &vars); err != nil {

View File

@ -158,40 +158,22 @@ resource "openstack_compute_secgroup_v2" "vpc_secgroup" {
}
}
module "instance_group_control_plane" {
source = "./modules/instance_group"
name = local.name
role = "control-plane"
initial_count = var.control_plane_count
image_id = openstack_images_image_v2.constellation_os_image.image_id
flavor_id = var.flavor_id
security_groups = [openstack_compute_secgroup_v2.vpc_secgroup.id]
tags = local.tags
uid = local.uid
disk_size = var.state_disk_size
state_disk_type = var.state_disk_type
availability_zone = var.availability_zone
network_id = openstack_networking_network_v2.vpc_network.id
init_secret_hash = local.initSecretHash
identity_internal_url = local.identity_internal_url
openstack_username = var.openstack_username
openstack_password = var.openstack_password
openstack_user_domain_name = var.openstack_user_domain_name
}
module "instance_group" {
module "instance_group_worker" {
source = "./modules/instance_group"
name = local.name
role = "worker"
initial_count = var.worker_count
for_each = var.node_groups
base_name = local.name
node_group_name = each.key
role = each.value.role
initial_count = each.value.initial_count
disk_size = each.value.state_disk_size
state_disk_type = each.value.state_disk_type
availability_zone = each.value.zone
image_id = openstack_images_image_v2.constellation_os_image.image_id
flavor_id = var.flavor_id
security_groups = [openstack_compute_secgroup_v2.vpc_secgroup.id]
tags = local.tags
uid = local.uid
security_groups = [openstack_compute_secgroup_v2.vpc_secgroup.id]
disk_size = var.state_disk_size
state_disk_type = var.state_disk_type
availability_zone = var.availability_zone
network_id = openstack_networking_network_v2.vpc_network.id
init_secret_hash = local.initSecretHash
identity_internal_url = local.identity_internal_url
@ -209,15 +191,24 @@ resource "openstack_networking_floatingip_v2" "public_ip" {
resource "openstack_compute_floatingip_associate_v2" "public_ip_associate" {
floating_ip = openstack_networking_floatingip_v2.public_ip.address
instance_id = module.instance_group_control_plane.instance_ids.0
instance_id = module.instance_group["control_plane_default"].instance_ids.0
depends_on = [
openstack_networking_router_v2.vpc_router,
openstack_networking_router_interface_v2.vpc_router_interface,
]
}
# TODO(malt3): get LoadBalancer API enabled in the test environment
moved {
from = module.instance_group_control_plane
to = module.instance_group["control_plane_default"]
}
moved {
from = module.instance_group_worker
to = module.instance_group["worker_default"]
}
# TODO(malt3): get LoadBalancer API enabled in the test environment
# resource "openstack_lb_loadbalancer_v2" "loadbalancer" {
# name = local.name
# description = "Constellation load balancer"

View File

@ -8,8 +8,14 @@ terraform {
}
locals {
name = "${var.name}-${var.role}"
tags = distinct(sort(concat(var.tags, ["constellation-role-${var.role}"])))
tags = distinct(sort(concat(var.tags, ["constellation-role-${var.role}"], ["constellation-node-group-${var.node_group_name}"])))
group_uid = random_id.uid.hex
#name = "${var.base_name}-${var.role}" // TODO keep old naming ?
name = "${var.base_name}-${var.role}-${local.group_uid}"
}
resource "random_id" "uid" {
byte_length = 4
}
# TODO(malt3): get this API enabled in the test environment

View File

@ -1,4 +1,9 @@
variable "name" {
variable "node_group_name" {
type = string
description = "Constellation name for the node group (used for configuration and CSP-independent naming)."
}
variable "base_name" {
type = string
description = "Base name of the instance group."
}

View File

@ -1,3 +1,20 @@
variable "node_groups" {
type = map(object({
role = string
initial_count = number // number of instances in the node group
state_disk_size = number // size of state disk (GiB)
state_disk_type = string // type of state disk. Can be 'standard' or 'premium'
zone = string // availability zone
}))
validation {
condition = can([for group in var.node_groups : group.role == "control-plane" || group.role == "worker"])
error_message = "The role has to be 'control-plane' or 'worker'."
}
description = "A map of node group names to node group configurations."
}
variable "cloud" {
type = string
default = null
@ -10,32 +27,6 @@ variable "name" {
description = "Base name of the cluster."
}
variable "control_plane_count" {
type = number
description = "The number of control plane nodes to deploy."
}
variable "worker_count" {
type = number
description = "The number of worker nodes to deploy."
}
variable "state_disk_size" {
type = number
default = 30
description = "The size of the state disk in GB."
}
variable "state_disk_type" {
type = string
description = "Disk/volume type to be used."
}
variable "availability_zone" {
type = string
description = "The availability zone to deploy the nodes in."
}
variable "image_url" {
type = string
description = "The image to use for cluster nodes."

View File

@ -229,52 +229,49 @@ func (v *AzureIAMVariables) String() string {
// OpenStackClusterVariables is user configuration for creating a cluster with Terraform on OpenStack.
type OpenStackClusterVariables struct {
// CommonVariables contains common variables.
CommonVariables
// Name of the cluster.
Name string `hcl:"name" cty:"name"`
// NodeGroups is a map of node groups to create.
NodeGroups map[string]OpenStackNodeGroup `hcl:"node_groups" cty:"node_groups"`
// Cloud is the (optional) name of the OpenStack cloud to use when reading the "clouds.yaml" configuration file. If empty, environment variables are used.
Cloud string
// AvailabilityZone is the OpenStack availability zone to use.
AvailabilityZone string
Cloud *string `hcl:"cloud" cty:"cloud"`
// Flavor is the ID of the OpenStack flavor (machine type) to use.
FlavorID string
FlavorID string `hcl:"flavor_id" cty:"flavor_id"`
// FloatingIPPoolID is the ID of the OpenStack floating IP pool to use for public IPs.
FloatingIPPoolID string
// StateDiskType is the OpenStack disk type to use for the state disk.
StateDiskType string
FloatingIPPoolID string `hcl:"floating_ip_pool_id" cty:"floating_ip_pool_id"`
// ImageURL is the URL of the OpenStack image to use.
ImageURL string
ImageURL string `hcl:"image_url" cty:"image_url"`
// DirectDownload decides whether to download the image directly from the URL to OpenStack or to upload it from the local machine.
DirectDownload bool
DirectDownload bool `hcl:"direct_download" cty:"direct_download"`
// OpenstackUserDomainName is the OpenStack user domain name to use.
OpenstackUserDomainName string
OpenstackUserDomainName string `hcl:"openstack_user_domain_name" cty:"openstack_user_domain_name"`
// OpenstackUsername is the OpenStack user name to use.
OpenstackUsername string
OpenstackUsername string `hcl:"openstack_username" cty:"openstack_username"`
// OpenstackPassword is the OpenStack password to use.
OpenstackPassword string
OpenstackPassword string `hcl:"openstack_password" cty:"openstack_password"`
// Debug is true if debug mode is enabled.
Debug bool
Debug bool `hcl:"debug" cty:"debug"`
}
// OpenStackNodeGroup is a node group to create on OpenStack.
type OpenStackNodeGroup struct {
// Role is the role of the node group.
Role string `hcl:"role" cty:"role"`
// InitialCount is the number of instances to create.
InitialCount int `hcl:"initial_count" cty:"initial_count"`
// Zone is the OpenStack availability zone to use.
Zone string `hcl:"zone" cty:"zone"`
// StateDiskType is the OpenStack disk type to use for the state disk.
StateDiskType string `hcl:"state_disk_type" cty:"state_disk_type"`
// StateDiskSizeGB is the size of the state disk to allocate to each node, in GB.
StateDiskSizeGB int `hcl:"state_disk_size" cty:"state_disk_size"`
}
// String returns a string representation of the variables, formatted as Terraform variables.
func (v *OpenStackClusterVariables) String() string {
b := &strings.Builder{}
b.WriteString(v.CommonVariables.String())
if v.Cloud != "" {
writeLinef(b, "cloud = %q", v.Cloud)
}
writeLinef(b, "availability_zone = %q", v.AvailabilityZone)
writeLinef(b, "flavor_id = %q", v.FlavorID)
writeLinef(b, "floating_ip_pool_id = %q", v.FloatingIPPoolID)
writeLinef(b, "image_url = %q", v.ImageURL)
writeLinef(b, "direct_download = %t", v.DirectDownload)
writeLinef(b, "state_disk_type = %q", v.StateDiskType)
writeLinef(b, "openstack_user_domain_name = %q", v.OpenstackUserDomainName)
writeLinef(b, "openstack_username = %q", v.OpenstackUsername)
writeLinef(b, "openstack_password = %q", v.OpenstackPassword)
writeLinef(b, "debug = %t", v.Debug)
return b.String()
f := hclwrite.NewEmptyFile()
gohcl.EncodeIntoBody(v, f.Body())
return string(f.Bytes())
}
// TODO(malt3): Add support for OpenStack IAM variables.

View File

@ -229,41 +229,47 @@ resource_group_name = "my-resource-group"
func TestOpenStackClusterVariables(t *testing.T) {
vars := OpenStackClusterVariables{
CommonVariables: CommonVariables{
Name: "cluster-name",
CountControlPlanes: 1,
CountWorkers: 2,
StateDiskSizeGB: 30,
},
Cloud: "my-cloud",
AvailabilityZone: "az-01",
Name: "cluster-name",
Cloud: toPtr("my-cloud"),
FlavorID: "flavor-0123456789abcdef",
FloatingIPPoolID: "fip-pool-0123456789abcdef",
StateDiskType: "performance-8",
ImageURL: "https://example.com/image.raw",
DirectDownload: true,
OpenstackUserDomainName: "my-user-domain",
OpenstackUsername: "my-username",
OpenstackPassword: "my-password",
Debug: true,
NodeGroups: map[string]OpenStackNodeGroup{
"control_plane_default": {
Role: "control-plane",
InitialCount: 1,
Zone: "az-01",
StateDiskType: "performance-8",
StateDiskSizeGB: 30,
},
},
}
// test that the variables are correctly rendered
want := `name = "cluster-name"
control_plane_count = 1
worker_count = 2
state_disk_size = 30
cloud = "my-cloud"
availability_zone = "az-01"
flavor_id = "flavor-0123456789abcdef"
floating_ip_pool_id = "fip-pool-0123456789abcdef"
image_url = "https://example.com/image.raw"
direct_download = true
state_disk_type = "performance-8"
node_groups = {
control_plane_default = {
initial_count = 1
role = "control-plane"
state_disk_size = 30
state_disk_type = "performance-8"
zone = "az-01"
}
}
cloud = "my-cloud"
flavor_id = "flavor-0123456789abcdef"
floating_ip_pool_id = "fip-pool-0123456789abcdef"
image_url = "https://example.com/image.raw"
direct_download = true
openstack_user_domain_name = "my-user-domain"
openstack_username = "my-username"
openstack_password = "my-password"
debug = true
openstack_username = "my-username"
openstack_password = "my-password"
debug = true
`
got := vars.String()
assert.Equal(t, want, got)

View File

@ -1,6 +1,9 @@
# Creating a Debug cluster
# Debug cluster
A debug cluster allows quicker iteration cycles during development by being able to upload new bootstrapper binaries through the `cdbg` tool.
Furthermore, a debug cluster allows you to access the bootstrapper logs through the cloud providers serial console.
## Creating a debug cluster
After building (see [here](./build-develop-deploy.md#build)), you can find all CLIs and binaries in the `build` directory.
@ -44,3 +47,11 @@ Finally run:
```sh
./constellation init
```
## Access bootstrapper logs
Once logged in to the control-plane machine, execute:
```sh
journalctl -fu constellation-bootstrapper
```