mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-28 00:49:26 -05:00
bootstrapper: refactor coredns and cilium setup (#2129)
* Decouple CoreDNS installation from Cilium * Align cilium helm installation with other charts * Remove unused functions --------- Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
39cea48741
commit
7152633255
@ -4,7 +4,6 @@ load("//bazel/go:go_test.bzl", "go_test")
|
||||
go_library(
|
||||
name = "kubernetes",
|
||||
srcs = [
|
||||
"ciliuminstallation.go",
|
||||
"cloud_provider.go",
|
||||
"k8sutil.go",
|
||||
"kubernetes.go",
|
||||
|
@ -1,81 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Edgeless Systems GmbH
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
// Package kubernetes provides functionality to bootstrap a Kubernetes cluster, or join an exiting one.
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/v2/internal/constants"
|
||||
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// installCilium sets up the cilium pod network.
|
||||
func installCilium(ctx context.Context, helmInstaller helmClient, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
|
||||
timeoutS := int64(10)
|
||||
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
|
||||
tolerations := []corev1.Toleration{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.kubernetes.io/unreachable",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &timeoutS,
|
||||
},
|
||||
}
|
||||
if err := kubectl.AddTolerationsToDeployment(ctx, tolerations, "coredns", "kube-system"); err != nil {
|
||||
return fmt.Errorf("failed to add tolerations to coredns deployment: %w", err)
|
||||
}
|
||||
if err := kubectl.EnforceCoreDNSSpread(ctx); err != nil {
|
||||
return fmt.Errorf("failed to enforce CoreDNS spread: %w", err)
|
||||
}
|
||||
|
||||
switch in.CloudProvider {
|
||||
case "aws", "azure", "openstack", "qemu":
|
||||
return installCiliumGeneric(ctx, helmInstaller, release, in.LoadBalancerHost, in.LoadBalancerPort)
|
||||
case "gcp":
|
||||
return installCiliumGCP(ctx, helmInstaller, release, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR, in.LoadBalancerHost, in.LoadBalancerPort)
|
||||
default:
|
||||
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
|
||||
}
|
||||
}
|
||||
|
||||
// installCiliumGeneric installs cilium with the given load balancer endpoint.
|
||||
// This is used for cloud providers that do not require special server-side configuration.
|
||||
// Currently this is AWS, Azure, and QEMU.
|
||||
func installCiliumGeneric(ctx context.Context, helmInstaller helmClient, release helm.Release, kubeAPIHost, kubeAPIPort string) error {
|
||||
if release.Values != nil {
|
||||
release.Values["k8sServiceHost"] = kubeAPIHost
|
||||
release.Values["k8sServicePort"] = kubeAPIPort
|
||||
}
|
||||
return helmInstaller.InstallChart(ctx, release)
|
||||
}
|
||||
|
||||
func installCiliumGCP(ctx context.Context, helmInstaller helmClient, release helm.Release, nodeName, nodePodCIDR, subnetworkPodCIDR, kubeAPIHost, kubeAPIPort string) error {
|
||||
out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", constants.ControlPlaneAdminConfFilename, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
// configure pod network CIDR
|
||||
release.Values["ipv4NativeRoutingCIDR"] = subnetworkPodCIDR
|
||||
release.Values["strictModeCIDR"] = subnetworkPodCIDR
|
||||
release.Values["k8sServiceHost"] = kubeAPIHost
|
||||
release.Values["k8sServicePort"] = kubeAPIPort
|
||||
|
||||
return helmInstaller.InstallChart(ctx, release)
|
||||
}
|
@ -50,7 +50,6 @@ const (
|
||||
type Client interface {
|
||||
Initialize(kubeconfig []byte) error
|
||||
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
|
||||
AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error
|
||||
AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error
|
||||
ListAllNamespaces(ctx context.Context) (*corev1.NamespaceList, error)
|
||||
AnnotateNode(ctx context.Context, nodeName, annotationKey, annotationValue string) error
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@ -162,6 +163,10 @@ func (k *KubeWrapper) InitCluster(
|
||||
return nil, fmt.Errorf("waiting for Kubernetes API to be available: %w", err)
|
||||
}
|
||||
|
||||
if err := k.client.EnforceCoreDNSSpread(ctx); err != nil {
|
||||
return nil, fmt.Errorf("configuring CoreDNS deployment: %w", err)
|
||||
}
|
||||
|
||||
// Setup the K8s components ConfigMap.
|
||||
k8sComponentsConfigMap, err := k.setupK8sComponentsConfigMap(ctx, kubernetesComponents, versionString)
|
||||
if err != nil {
|
||||
@ -192,8 +197,12 @@ func (k *KubeWrapper) InitCluster(
|
||||
}
|
||||
|
||||
log.Infof("Installing Cilium")
|
||||
if err = installCilium(ctx, k.helmClient, k.client, helmReleases.Cilium, setupPodNetworkInput); err != nil {
|
||||
return nil, fmt.Errorf("installing pod network: %w", err)
|
||||
ciliumVals, err := k.setupCiliumVals(ctx, setupPodNetworkInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up cilium vals: %w", err)
|
||||
}
|
||||
if err := k.helmClient.InstallChartWithValues(ctx, helmReleases.Cilium, ciliumVals); err != nil {
|
||||
return nil, fmt.Errorf("installing cilium pod network: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Waiting for Cilium to become healthy")
|
||||
@ -552,6 +561,31 @@ func (k *KubeWrapper) setupOperatorVals(ctx context.Context) (map[string]any, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupCiliumVals(ctx context.Context, in k8sapi.SetupPodNetworkInput) (map[string]any, error) {
|
||||
vals := map[string]any{
|
||||
"k8sServiceHost": in.LoadBalancerHost,
|
||||
"k8sServicePort": in.LoadBalancerPort,
|
||||
}
|
||||
|
||||
// GCP requires extra configuration for Cilium
|
||||
if cloudprovider.FromString(k.cloudProvider) == cloudprovider.GCP {
|
||||
if out, err := exec.CommandContext(
|
||||
ctx, constants.KubectlPath,
|
||||
"--kubeconfig", constants.ControlPlaneAdminConfFilename,
|
||||
"patch", "node", in.NodeName, "-p", "{\"spec\":{\"podCIDR\": \""+in.FirstNodePodCIDR+"\"}}",
|
||||
).CombinedOutput(); err != nil {
|
||||
err = errors.New(string(out))
|
||||
return nil, fmt.Errorf("%s: %w", out, err)
|
||||
}
|
||||
|
||||
vals["ipv4NativeRoutingCIDR"] = in.SubnetworkPodCIDR
|
||||
vals["strictModeCIDR"] = in.SubnetworkPodCIDR
|
||||
|
||||
}
|
||||
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
type ccmConfigGetter interface {
|
||||
GetCCMConfig(ctx context.Context, providerID, cloudServiceAccountURI string) ([]byte, error)
|
||||
}
|
||||
|
@ -516,7 +516,6 @@ func (s *stubConfigProvider) JoinConfiguration(_ bool) k8sapi.KubeadmJoinYAML {
|
||||
|
||||
type stubKubectl struct {
|
||||
createConfigMapErr error
|
||||
addTolerationsToDeploymentErr error
|
||||
addTNodeSelectorsToDeploymentErr error
|
||||
waitForCRDsErr error
|
||||
listAllNamespacesErr error
|
||||
@ -534,10 +533,6 @@ func (s *stubKubectl) CreateConfigMap(_ context.Context, _ corev1.ConfigMap) err
|
||||
return s.createConfigMapErr
|
||||
}
|
||||
|
||||
func (s *stubKubectl) AddTolerationsToDeployment(_ context.Context, _ []corev1.Toleration, _, _ string) error {
|
||||
return s.addTolerationsToDeploymentErr
|
||||
}
|
||||
|
||||
func (s *stubKubectl) AddNodeSelectorsToDeployment(_ context.Context, _ map[string]string, _, _ string) error {
|
||||
return s.addTNodeSelectorsToDeploymentErr
|
||||
}
|
||||
@ -563,10 +558,6 @@ type stubHelmClient struct {
|
||||
installChartError error
|
||||
}
|
||||
|
||||
func (s *stubHelmClient) InstallCilium(_ context.Context, _ k8sapi.Client, _ helm.Release, _ k8sapi.SetupPodNetworkInput) error {
|
||||
return s.ciliumError
|
||||
}
|
||||
|
||||
func (s *stubHelmClient) InstallChart(ctx context.Context, release helm.Release) error {
|
||||
return s.InstallChartWithValues(ctx, release, release.Values)
|
||||
}
|
||||
|
@ -144,39 +144,34 @@ func (k *Kubectl) GetNodes(ctx context.Context) ([]corev1.Node, error) {
|
||||
return nodes.Items, nil
|
||||
}
|
||||
|
||||
// AddTolerationsToDeployment adds [K8s tolerations] to the deployment, identified
|
||||
// by name and namespace.
|
||||
//
|
||||
// [K8s tolerations]: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||
func (k *Kubectl) AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error {
|
||||
deployments := k.AppsV1().Deployments(namespace)
|
||||
// EnforceCoreDNSSpread adds a pod anti-affinity to the CoreDNS deployment to ensure that
|
||||
// CoreDNS pods are spread across nodes.
|
||||
func (k *Kubectl) EnforceCoreDNSSpread(ctx context.Context) error {
|
||||
// allow CoreDNS Pods to run on uninitialized nodes, which is required by cloud-controller-manager
|
||||
tolerationSeconds := int64(10)
|
||||
tolerations := []corev1.Toleration{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.kubernetes.io/unreachable",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
TolerationSeconds: &tolerationSeconds,
|
||||
},
|
||||
}
|
||||
|
||||
deployments := k.AppsV1().Deployments("kube-system")
|
||||
// retry resource update if an error occurs
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
result, err := deployments.Get(ctx, name, metav1.GetOptions{})
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
result, err := deployments.Get(ctx, "coredns", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Deployment to add toleration: %w", err)
|
||||
}
|
||||
|
||||
result.Spec.Template.Spec.Tolerations = append(result.Spec.Template.Spec.Tolerations, tolerations...)
|
||||
if _, err = deployments.Update(ctx, result, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// EnforceCoreDNSSpread adds a pod anti-affinity to the coredns deployment to ensure that
|
||||
// coredns pods are spread across nodes.
|
||||
func (k *Kubectl) EnforceCoreDNSSpread(ctx context.Context) error {
|
||||
deployments := k.AppsV1().Deployments("kube-system")
|
||||
// retry resource update if an error occurs
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
result, err := deployments.Get(ctx, "coredns", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Deployment to add toleration: %w", err)
|
||||
}
|
||||
|
||||
if result.Spec.Template.Spec.Affinity == nil {
|
||||
result.Spec.Template.Spec.Affinity = &corev1.Affinity{}
|
||||
@ -206,10 +201,6 @@ func (k *Kubectl) EnforceCoreDNSSpread(ctx context.Context) error {
|
||||
_, err = deployments.Update(ctx, result, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddNodeSelectorsToDeployment adds [K8s selectors] to the deployment, identified
|
||||
|
Loading…
Reference in New Issue
Block a user