helm: manage CoreDNS addon as Helm chart (#3236)

* helm: generate CoreDNS Helm chart
* helm: load CoreDNS Helm chart
* bootstrapper: don't install coredns addon
This commit is contained in:
Markus Rudy 2024-07-03 19:38:55 +02:00 committed by Markus Rudy
parent 807bbbfd16
commit 97c77e2a78
22 changed files with 689 additions and 69 deletions

View file

@ -465,6 +465,14 @@ go_library(
"charts/cert-manager/templates/cainjector-config.yaml",
"charts/cert-manager/templates/extras-objects.yaml",
"charts/cert-manager/templates/podmonitor.yaml",
"charts/coredns/Chart.yaml",
"charts/coredns/values.yaml",
"charts/coredns/templates/clusterrole.yaml",
"charts/coredns/templates/clusterrolebinding.yaml",
"charts/coredns/templates/configmap.yaml",
"charts/coredns/templates/deployment.yaml",
"charts/coredns/templates/service.yaml",
"charts/coredns/templates/serviceaccount.yaml",
],
importpath = "github.com/edgelesssys/constellation/v2/internal/constellation/helm",
visibility = ["//:__subpackages__"],
@ -492,6 +500,7 @@ go_library(
"@io_k8s_client_go//restmapper",
"@io_k8s_client_go//tools/clientcmd",
"@io_k8s_client_go//util/retry",
"@io_k8s_kubernetes//cmd/kubeadm/app/constants",
"@sh_helm_helm_v3//pkg/action",
"@sh_helm_helm_v3//pkg/chart",
"@sh_helm_helm_v3//pkg/chart/loader",

View file

@ -0,0 +1,3 @@
apiVersion: v2
name: kube-dns
version: 0.0.0

View file

@ -0,0 +1,23 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch

View file

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system

View file

@ -0,0 +1,28 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes {{ .Values.dnsDomain }} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}

View file

@ -0,0 +1,109 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
name: coredns
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: '{{ .Values.image }}'
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 10
volumes:
- configMap:
items:
- key: Corefile
path: Corefile
name: coredns
name: config-volume
status: {}

View file

@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
# Without this resourceVersion value, an update of the Service between versions will yield:
# Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
resourceVersion: "0"
spec:
clusterIP: "{{ .Values.clusterIP }}"
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
- name: metrics
port: 9153
protocol: TCP
targetPort: 9153
selector:
k8s-app: kube-dns

View file

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system

View file

@ -0,0 +1,3 @@
clusterIP: 10.96.0.10
dnsDomain: cluster.local
image: registry.k8s.io/coredns/coredns:v1.11.1@sha256:1eeb4c7316bacb1d4c8ead65571cd92dd21e27359f0d4917f1a5822a73b75db1

View file

@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "corednsgen_lib",
srcs = ["corednsgen.go"],
importpath = "github.com/edgelesssys/constellation/v2/internal/constellation/helm/corednsgen",
visibility = ["//visibility:private"],
deps = [
"//internal/versions",
"@com_github_regclient_regclient//:regclient",
"@com_github_regclient_regclient//types/ref",
"@io_k8s_api//apps/v1:apps",
"@io_k8s_api//core/v1:core",
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm",
"@io_k8s_kubernetes//cmd/kubeadm/app/images",
"@io_k8s_kubernetes//cmd/kubeadm/app/phases/addons/dns",
"@io_k8s_kubernetes//cmd/kubeadm/app/util",
"@io_k8s_sigs_yaml//:yaml",
],
)
go_binary(
name = "corednsgen",
embed = [":corednsgen_lib"],
visibility = ["//:__subpackages__"],
)

View file

@ -0,0 +1,153 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
// corednsgen synthesizes a Helm chart from the resource templates embedded in
// kubeadm and writes it to the `charts` directory underneath the current
// working directory. This removes the existing `coredns` subdirectory!
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/regclient/regclient"
"github.com/regclient/regclient/types/ref"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
kubedns "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"sigs.k8s.io/yaml"
)
var chartDir = flag.String("charts", "./charts", "target directory to create charts in")
func main() {
flag.Parse()
if err := os.RemoveAll(filepath.Join(*chartDir, "coredns")); err != nil {
log.Fatalf("Could not remove chart dir: %v", err)
}
writeFileRelativeToChartDir(chartYAML(), "Chart.yaml")
writeFileRelativeToChartDir(valuesYAML(), "values.yaml")
writeTemplate(kubedns.CoreDNSServiceAccount, "serviceaccount.yaml")
writeTemplate(kubedns.CoreDNSClusterRole, "clusterrole.yaml")
writeTemplate(kubedns.CoreDNSClusterRoleBinding, "clusterrolebinding.yaml")
writeTemplate(kubedns.CoreDNSConfigMap, "configmap.yaml")
writeTemplate(kubedns.CoreDNSService, "service.yaml")
writeFileRelativeToChartDir(patchedDeployment(), "templates", "deployment.yaml")
}
func chartYAML() []byte {
chart := map[string]string{
"apiVersion": "v2",
"name": "kube-dns",
"version": "0.0.0",
}
data, err := yaml.Marshal(chart)
if err != nil {
log.Fatalf("Could not marshal Chart.yaml: %v", err)
}
return data
}
func valuesYAML() []byte {
cfg := &kubeadm.ClusterConfiguration{
KubernetesVersion: string(versions.Default),
ImageRepository: "registry.k8s.io",
}
img := images.GetDNSImage(cfg)
ref, err := ref.New(img)
if err != nil {
log.Fatalf("Could not parse image reference: %v", err)
}
rc := regclient.New()
m, err := rc.ManifestGet(context.Background(), ref)
if err != nil {
log.Fatalf("Could not get image manifest: %v", err)
}
values := map[string]string{
"clusterIP": "10.96.0.10",
"dnsDomain": "cluster.local",
"image": fmt.Sprintf("%s/%s:%s@%s", ref.Registry, ref.Repository, ref.Tag, m.GetDescriptor().Digest.String()),
}
data, err := yaml.Marshal(values)
if err != nil {
log.Fatalf("Could not marshal values.yaml: %v", err)
}
return data
}
// patchedDeployment extracts the CoreDNS deployment from kubeadm and adds necessary tolerations.
func patchedDeployment() []byte {
var d appsv1.Deployment
if err := yaml.Unmarshal(parseTemplate(kubedns.CoreDNSDeployment), &d); err != nil {
log.Fatalf("Could not parse deployment: %v", err)
}
tolerations := []corev1.Toleration{
{Key: "node.cloudprovider.kubernetes.io/uninitialized", Value: "true", Effect: corev1.TaintEffectNoSchedule},
{Key: "node.kubernetes.io/unreachable", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute, TolerationSeconds: toPtr(int64(10))},
}
d.Spec.Template.Spec.Tolerations = append(d.Spec.Template.Spec.Tolerations, tolerations...)
out, err := yaml.Marshal(d)
if err != nil {
log.Fatalf("Could not marshal patched deployment: %v", err)
}
return out
}
func writeFileRelativeToChartDir(content []byte, pathElements ...string) {
p := filepath.Join(append([]string{*chartDir, "coredns"}, pathElements...)...)
d := filepath.Dir(p)
if err := os.MkdirAll(d, 0o755); err != nil {
log.Fatalf("Could not create dir %q: %v", d, err)
}
if err := os.WriteFile(p, content, 0o644); err != nil {
log.Fatalf("Could not write file %q: %v", p, err)
}
}
// parseTemplate replaces the Go template placeholders in kubeadm resources
// with fixed values or Helm value placeholders.
func parseTemplate(tmpl string) []byte {
vars := struct {
DeploymentName, Image, ControlPlaneTaintKey, DNSDomain, DNSIP string
Replicas *int32
}{
DeploymentName: "coredns",
DNSDomain: `{{ .Values.dnsDomain }}`,
DNSIP: `"{{ .Values.clusterIP }}"`,
Image: `"{{ .Values.image }}"`,
ControlPlaneTaintKey: "node-role.kubernetes.io/control-plane",
Replicas: toPtr(int32(2)),
}
data, err := kubeadmutil.ParseTemplate(tmpl, vars)
if err != nil {
log.Fatalf("Could not interpolate template: %v", err)
}
return data
}
func writeTemplate(tmpl string, name string) {
data := parseTemplate(tmpl)
writeFileRelativeToChartDir(data, "templates", name)
}
func toPtr[T any](v T) *T {
return &v
}

View file

@ -91,6 +91,7 @@ type Options struct {
HelmWaitMode WaitMode
ApplyTimeout time.Duration
OpenStackValues *OpenStackValues
ServiceCIDR string
}
// PrepareApply loads the charts and returns the executor to apply them.
@ -114,7 +115,8 @@ func (h Client) loadReleases(
) ([]release, error) {
helmLoader := newLoader(flags.CSP, flags.AttestationVariant, flags.K8sVersion, stateFile, h.cliVersion)
h.log.Debug("Created new Helm loader")
return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, flags.OpenStackValues)
// TODO(burgerdev): pass down the entire flags struct
return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, flags.OpenStackValues, flags.ServiceCIDR)
}
// Applier runs the Helm actions.

View file

@ -199,6 +199,7 @@ func TestHelmApply(t *testing.T) {
certManagerVersion = *tc.clusterCertManagerVersion
}
helmListVersion(lister, "cilium", "v1.15.5-edg.1")
helmListVersion(lister, "coredns", "v0.0.0")
helmListVersion(lister, "cert-manager", certManagerVersion)
helmListVersion(lister, "constellation-services", tc.clusterMicroServiceVersion)
helmListVersion(lister, "constellation-operators", tc.clusterMicroServiceVersion)

View file

@ -31,6 +31,7 @@ import (
// Run `go generate` to download (and patch) upstream helm charts.
//go:generate ./generateCilium.sh
//go:generate go run ./corednsgen/
//go:generate ./update-csi-charts.sh
//go:generate ./generateCertManager.sh
//go:generate ./update-aws-load-balancer-chart.sh
@ -46,6 +47,7 @@ type chartInfo struct {
var (
// Charts we fetch from an upstream with real versions.
coreDNSInfo = chartInfo{releaseName: "coredns", chartName: "coredns", path: "charts/coredns"}
ciliumInfo = chartInfo{releaseName: "cilium", chartName: "cilium", path: "charts/cilium"}
certManagerInfo = chartInfo{releaseName: "cert-manager", chartName: "cert-manager", path: "charts/cert-manager"}
awsLBControllerInfo = chartInfo{releaseName: "aws-load-balancer-controller", chartName: "aws-load-balancer-controller", path: "charts/aws-load-balancer-controller"}
@ -124,7 +126,7 @@ type OpenStackValues struct {
// loadReleases loads the embedded helm charts and returns them as a HelmReleases object.
func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret,
serviceAccURI string, openStackValues *OpenStackValues,
serviceAccURI string, openStackValues *OpenStackValues, serviceCIDR string,
) (releaseApplyOrder, error) {
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
if err != nil {
@ -133,6 +135,16 @@ func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWa
ciliumVals := extraCiliumValues(i.csp, conformanceMode, i.stateFile.Infrastructure)
ciliumRelease.values = mergeMaps(ciliumRelease.values, ciliumVals)
coreDNSRelease, err := i.loadRelease(coreDNSInfo, helmWaitMode)
if err != nil {
return nil, fmt.Errorf("loading coredns: %w", err)
}
coreDNSVals, err := extraCoreDNSValues(serviceCIDR)
if err != nil {
return nil, fmt.Errorf("loading coredns values: %w", err)
}
coreDNSRelease.values = mergeMaps(coreDNSRelease.values, coreDNSVals)
certManagerRelease, err := i.loadRelease(certManagerInfo, helmWaitMode)
if err != nil {
return nil, fmt.Errorf("loading cert-manager: %w", err)
@ -156,7 +168,7 @@ func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWa
}
conServicesRelease.values = mergeMaps(conServicesRelease.values, svcVals)
releases := releaseApplyOrder{ciliumRelease, conServicesRelease, certManagerRelease, operatorRelease}
releases := releaseApplyOrder{ciliumRelease, coreDNSRelease, conServicesRelease, certManagerRelease, operatorRelease}
if deployCSIDriver {
csiRelease, err := i.loadRelease(csiInfo, WaitModeNone)
if err != nil {
@ -224,6 +236,8 @@ func (i *chartLoader) loadRelease(info chartInfo, helmWaitMode WaitMode) (releas
values = i.loadAWSLBControllerValues()
case csiInfo.releaseName:
values = i.loadCSIValues()
default:
values = map[string]any{}
}
// Charts we package ourselves have version 0.0.0.

View file

@ -94,7 +94,7 @@ func TestLoadReleases(t *testing.T) {
helmReleases, err := chartLoader.loadReleases(
true, false, WaitModeAtomic,
uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")},
fakeServiceAccURI(cloudprovider.GCP), nil,
fakeServiceAccURI(cloudprovider.GCP), nil, "172.16.128.0/17",
)
require.NoError(err)
for _, release := range helmReleases {
@ -260,6 +260,55 @@ func TestConstellationServices(t *testing.T) {
}
}
func TestExtraCoreDNSValues(t *testing.T) {
testCases := map[string]struct {
cidr string
wantIP string
wantUnset bool
wantErr bool
}{
"default": {
cidr: "10.96.0.0/12",
wantIP: "10.96.0.10",
},
"custom": {
cidr: "172.16.128.0/17",
wantIP: "172.16.128.10",
},
"too small": {
cidr: "172.16.0.0/30",
wantErr: true,
},
"bad ip": {
cidr: "cluster.local",
wantErr: true,
},
"v6": {
cidr: "fd12:3456:789a:100::/56",
wantIP: "fd12:3456:789a:100::a",
},
"no ip": {
wantUnset: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
values, err := extraCoreDNSValues(tc.cidr)
if tc.wantErr {
assert.Error(t, err)
return
}
ip, ok := values["clusterIP"]
if tc.wantUnset {
assert.False(t, ok)
return
}
assert.Equal(t, tc.wantIP, ip)
})
}
}
// TestOperators checks if the rendered constellation-services chart produces the expected yaml files.
func TestOperators(t *testing.T) {
testCases := map[string]struct {

View file

@ -21,8 +21,21 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
func extraCoreDNSValues(serviceCIDR string) (map[string]any, error) {
if serviceCIDR == "" {
return map[string]any{}, nil
}
ip, err := kubeadmconstants.GetDNSIP(serviceCIDR)
if err != nil {
return nil, fmt.Errorf("calculating DNS service IP: %w", err)
}
return map[string]any{"clusterIP": ip.String()}, nil
}
// TODO(malt3): switch over to DNS name on AWS and Azure
// soon as every apiserver certificate of every control-plane node
// has the dns endpoint in its SAN list.

View file

@ -188,65 +188,6 @@ func (k *Kubectl) PatchFirstNodePodCIDR(ctx context.Context, firstNodePodCIDR st
return err
}
// EnforceCoreDNSSpread adds a pod anti-affinity to the CoreDNS deployment to ensure that
// CoreDNS pods are spread across nodes.
func (k *Kubectl) EnforceCoreDNSSpread(ctx context.Context) error {
// allow CoreDNS Pods to run on uninitialized nodes, which is required by cloud-controller-manager
tolerationSeconds := int64(10)
tolerations := []corev1.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "node.kubernetes.io/unreachable",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoExecute,
TolerationSeconds: &tolerationSeconds,
},
}
deployments := k.AppsV1().Deployments("kube-system")
// retry resource update if an error occurs
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
result, err := deployments.Get(ctx, "coredns", metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get Deployment to add toleration: %w", err)
}
result.Spec.Template.Spec.Tolerations = append(result.Spec.Template.Spec.Tolerations, tolerations...)
if result.Spec.Template.Spec.Affinity == nil {
result.Spec.Template.Spec.Affinity = &corev1.Affinity{}
}
if result.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {
result.Spec.Template.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{}
}
result.Spec.Template.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []corev1.WeightedPodAffinityTerm{}
if result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []corev1.PodAffinityTerm{}
}
result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "k8s-app",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"kube-dns"},
},
},
},
TopologyKey: "kubernetes.io/hostname",
})
_, err = deployments.Update(ctx, result, metav1.UpdateOptions{})
return err
})
}
// AddNodeSelectorsToDeployment adds [K8s selectors] to the deployment, identified
// by name and namespace.
//