From 63e263c6a5e85f269158f3c4ab29630c71d63366 Mon Sep 17 00:00:00 2001 From: Leonard Cohnen Date: Tue, 16 Jul 2024 13:27:42 +0200 Subject: [PATCH] k8s: use separate lb for K8s services on azure --- internal/constellation/helm/overrides.go | 2 +- terraform/infrastructure/azure/main.tf | 79 ++++++++++++++++--- .../azure/modules/scale_set/main.tf | 1 + 3 files changed, 68 insertions(+), 14 deletions(-) diff --git a/internal/constellation/helm/overrides.go b/internal/constellation/helm/overrides.go index deb515909..fdadaac88 100644 --- a/internal/constellation/helm/overrides.go +++ b/internal/constellation/helm/overrides.go @@ -243,7 +243,7 @@ func getCCMConfig(azureState state.Azure, serviceAccURI string) ([]byte, error) ResourceGroup: azureState.ResourceGroup, LoadBalancerSku: "standard", SecurityGroupName: azureState.NetworkSecurityGroupName, - LoadBalancerName: azureState.LoadBalancerName, + LoadBalancerName: "kubernetes-lb", UseInstanceMetadata: true, VMType: "vmss", Location: creds.Location, diff --git a/terraform/infrastructure/azure/main.tf b/terraform/infrastructure/azure/main.tf index 7338f86ba..b1e060142 100644 --- a/terraform/infrastructure/azure/main.tf +++ b/terraform/infrastructure/azure/main.tf @@ -33,7 +33,6 @@ locals { { name = "kubernetes", port = "6443", health_check_protocol = "Https", path = "/readyz", priority = 100 }, { name = "bootstrapper", port = "9000", health_check_protocol = "Tcp", path = null, priority = 101 }, { name = "verify", port = "30081", health_check_protocol = "Tcp", path = null, priority = 102 }, - { name = "konnectivity", port = "8132", health_check_protocol = "Tcp", path = null, priority = 103 }, { name = "recovery", port = "9999", health_check_protocol = "Tcp", path = null, priority = 104 }, { name = "join", port = "30090", health_check_protocol = "Tcp", path = null, priority = 105 }, var.debug ? [{ name = "debugd", port = "4000", health_check_protocol = "Tcp", path = null, priority = 106 }] : [], @@ -214,10 +213,13 @@ resource "azurerm_network_security_group" "security_group" { tags = local.tags dynamic "security_rule" { - for_each = concat( - local.ports, - [{ name = "nodeports", port = local.ports_node_range, priority = 200 }] - ) + # we keep this rule for one last release since the azurerm provider does not + # support moving security rules that are inlined (like this) to the external resource one. + # Even worse, just defining the azurerm_network_security_group without the + # "security_rule" block will NOT remove all the rules but do nothing. + # TODO(@3u13r): remove the "security_rule" block in the next release after this code has landed. + # So either after 2.18 or after 2.17.X if cerry-picked release. + for_each = [{ name = "konnectivity", priority = 1000, port = 8132 }] content { name = security_rule.value.name priority = security_rule.value.priority @@ -232,6 +234,28 @@ resource "azurerm_network_security_group" "security_group" { } } +resource "azurerm_network_security_rule" "nsg_rule" { + for_each = { + for o in concat( + local.ports, + [{ name = "nodeports", port = local.ports_node_range, priority = 200 }] + ) + : o.name => o + } + + name = each.value.name + priority = each.value.priority + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = each.value.port + source_address_prefix = "*" + destination_address_prefix = "*" + resource_group_name = var.resource_group + network_security_group_name = azurerm_network_security_group.security_group.name +} + module "scale_set_group" { source = "./modules/scale_set" for_each = var.node_groups @@ -257,14 +281,8 @@ module "scale_set_group" { image_id = var.image_id network_security_group_id = azurerm_network_security_group.security_group.id subnet_id = azurerm_subnet.node_subnet.id - backend_address_pool_ids = each.value.role == "control-plane" ? [ - azurerm_lb_backend_address_pool.all.id, - module.loadbalancer_backend_control_plane.backendpool_id - ] : [ - azurerm_lb_backend_address_pool.all.id, - module.loadbalancer_backend_worker.backendpool_id - ] - marketplace_image = var.marketplace_image + backend_address_pool_ids = each.value.role == "control-plane" ? [module.loadbalancer_backend_control_plane.backendpool_id] : [] + marketplace_image = var.marketplace_image } module "jump_host" { @@ -286,3 +304,38 @@ data "azurerm_user_assigned_identity" "uaid" { name = local.uai_name resource_group_name = local.uai_resource_group } + +moved { + to = azurerm_network_security_rule.nsg_rule["nodeports"] + from = azurerm_network_security_group.security_group.security_rule["nodeports"] +} + +moved { + to = azurerm_network_security_rule.nsg_rule["kubernetes"] + from = azurerm_network_security_group.security_group.security_rule["kubernetes"] +} + +moved { + to = azurerm_network_security_rule.nsg_rule["bootstrapper"] + from = azurerm_network_security_group.security_group.security_rule["bootstrapper"] +} + +moved { + to = azurerm_network_security_rule.nsg_rule["verify"] + from = azurerm_network_security_group.security_group.security_rule["verify"] +} + +moved { + to = azurerm_network_security_rule.nsg_rule["recovery"] + from = azurerm_network_security_group.security_group.security_rule["recovery"] +} + +moved { + to = azurerm_network_security_rule.nsg_rule["join"] + from = azurerm_network_security_group.security_group.security_rule["join"] +} + +moved { + to = azurerm_network_security_rule.nsg_rule["debugd"] + from = azurerm_network_security_group.security_group.security_rule["debugd"] +} diff --git a/terraform/infrastructure/azure/modules/scale_set/main.tf b/terraform/infrastructure/azure/modules/scale_set/main.tf index 9bd3d6cb9..ffc5b8117 100644 --- a/terraform/infrastructure/azure/modules/scale_set/main.tf +++ b/terraform/infrastructure/azure/modules/scale_set/main.tf @@ -119,6 +119,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "scale_set" { instances, # required. autoscaling modifies the instance count externally source_image_id, # required. update procedure modifies the image id externally source_image_reference, # required. update procedure modifies the image reference externally + network_interface[0].ip_configuration[0].load_balancer_backend_address_pool_ids ] } }