AB#2490: deploy KMS via Helm

* Bundle helm-install related code in speparate package
* Move cilium installation to new helm package
This commit is contained in:
Otto Bittner 2022-10-18 13:15:54 +02:00
parent 62168bbf98
commit c6ccee1250
17 changed files with 311 additions and 495 deletions

View file

@ -12,7 +12,6 @@ const (
binDir = "/run/state/bin"
kubeadmPath = "/run/state/bin/kubeadm"
kubeletPath = "/run/state/bin/kubelet"
kubectlPath = "/run/state/bin/kubectl"
kubeletServiceEtcPath = "/etc/systemd/system/kubelet.service"
kubeletServiceStatePath = "/run/state/systemd/system/kubelet.service"
kubeadmConfEtcPath = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"

View file

@ -10,7 +10,6 @@ import (
"context"
"crypto/rand"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
@ -32,7 +31,6 @@ import (
kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions"
@ -40,20 +38,14 @@ import (
"github.com/spf13/afero"
"go.uber.org/zap"
"golang.org/x/text/transform"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli"
corev1 "k8s.io/api/core/v1"
)
const (
// kubeConfig is the path to the Kubernetes admin config (used for authentication).
kubeConfig = "/etc/kubernetes/admin.conf"
// kubeletStartTimeout is the maximum time given to the kubelet service to (re)start.
kubeletStartTimeout = 10 * time.Minute
// crdTimeout is the maximum time given to the CRDs to be created.
crdTimeout = 30 * time.Second
// helmTimeout is the maximum time given to the helm client.
helmTimeout = 5 * time.Minute
)
// Client provides the functions to talk to the k8s API.
@ -122,7 +114,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, version versions
return fmt.Errorf("installing kubeadm: %w", err)
}
if err := k.inst.Install(
ctx, versionConf.KubectlURL, []string{kubectlPath}, executablePerm, false,
ctx, versionConf.KubectlURL, []string{constants.KubectlPath}, executablePerm, false,
); err != nil {
return fmt.Errorf("installing kubectl: %w", err)
}
@ -235,19 +227,19 @@ func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context,
return fmt.Errorf("generating konnectivity server certificate: %w", err)
}
if out, err := exec.CommandContext(ctx, kubectlPath, "config", "set-credentials", "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "system:konnectivity-server",
if out, err := exec.CommandContext(ctx, constants.KubectlPath, "config", "set-credentials", "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "system:konnectivity-server",
"--client-certificate", "/etc/kubernetes/konnectivity.crt", "--client-key", "/etc/kubernetes/konnectivity.key", "--embed-certs=true").CombinedOutput(); err != nil {
return fmt.Errorf("konnectivity kubeconfig set-credentials: %w, %s", err, string(out))
}
if out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "set-cluster", "kubernetes", "--server", "https://"+loadBalancerEndpoint,
if out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "set-cluster", "kubernetes", "--server", "https://"+loadBalancerEndpoint,
"--certificate-authority", "/etc/kubernetes/pki/ca.crt", "--embed-certs=true").CombinedOutput(); err != nil {
return fmt.Errorf("konnectivity kubeconfig set-cluster: %w, %s", err, string(out))
}
if out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "set-context", "system:konnectivity-server@kubernetes",
if out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "set-context", "system:konnectivity-server@kubernetes",
"--cluster", "kubernetes", "--user", "system:konnectivity-server").CombinedOutput(); err != nil {
return fmt.Errorf("konnectivity kubeconfig set-context: %w, %s", err, string(out))
}
if out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "use-context", "system:konnectivity-server@kubernetes").CombinedOutput(); err != nil {
if out, err := exec.CommandContext(ctx, constants.KubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "use-context", "system:konnectivity-server@kubernetes").CombinedOutput(); err != nil {
return fmt.Errorf("konnectivity kubeconfig use-context: %w, %s", err, string(out))
}
// cleanup
@ -264,33 +256,6 @@ func (k *KubernetesUtil) SetupKonnectivity(kubectl Client, konnectivityAgentsDae
return kubectl.Apply(konnectivityAgentsDaemonSet, true)
}
func (k *KubernetesUtil) SetupHelmDeployments(ctx context.Context, kubectl Client, helmDeployments []byte, in SetupPodNetworkInput, log *logger.Logger) error {
var helmDeploy helm.Deployments
if err := json.Unmarshal(helmDeployments, &helmDeploy); err != nil {
return fmt.Errorf("unmarshalling helm deployments: %w", err)
}
settings := cli.New()
settings.KubeConfig = kubeConfig
actionConfig := new(action.Configuration)
if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace,
"secret", log.Infof); err != nil {
return err
}
helmClient := action.NewInstall(actionConfig)
helmClient.Namespace = constants.HelmNamespace
helmClient.ReleaseName = "cilium"
helmClient.Wait = true
helmClient.Timeout = helmTimeout
if err := k.deployCilium(ctx, in, helmClient, helmDeploy.Cilium, kubectl); err != nil {
return fmt.Errorf("deploying cilium: %w", err)
}
return nil
}
type SetupPodNetworkInput struct {
CloudProvider string
NodeName string
@ -300,85 +265,6 @@ type SetupPodNetworkInput struct {
LoadBalancerEndpoint string
}
// deployCilium sets up the cilium pod network.
func (k *KubernetesUtil) deployCilium(ctx context.Context, in SetupPodNetworkInput, helmClient *action.Install, ciliumDeployment helm.Deployment, kubectl Client) error {
switch in.CloudProvider {
case "gcp":
return k.deployCiliumGCP(ctx, helmClient, kubectl, ciliumDeployment, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR, in.LoadBalancerEndpoint)
case "azure":
return k.deployCiliumAzure(ctx, helmClient, ciliumDeployment, in.LoadBalancerEndpoint)
case "qemu":
return k.deployCiliumQEMU(ctx, helmClient, ciliumDeployment, in.SubnetworkPodCIDR, in.LoadBalancerEndpoint)
default:
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
}
}
func (k *KubernetesUtil) deployCiliumAzure(ctx context.Context, helmClient *action.Install, ciliumDeployment helm.Deployment, kubeAPIEndpoint string) error {
host := kubeAPIEndpoint
ciliumDeployment.Values["k8sServiceHost"] = host
ciliumDeployment.Values["k8sServicePort"] = strconv.Itoa(constants.KubernetesPort)
_, err := helmClient.RunWithContext(ctx, ciliumDeployment.Chart, ciliumDeployment.Values)
if err != nil {
return fmt.Errorf("installing cilium: %w", err)
}
return nil
}
func (k *KubernetesUtil) deployCiliumGCP(ctx context.Context, helmClient *action.Install, kubectl Client, ciliumDeployment helm.Deployment, nodeName, nodePodCIDR, subnetworkPodCIDR, kubeAPIEndpoint string) error {
out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
if err != nil {
err = errors.New(string(out))
return err
}
timeoutS := int64(10)
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
tolerations := []corev1.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "node.kubernetes.io/unreachable",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoExecute,
TolerationSeconds: &timeoutS,
},
}
if err = kubectl.AddTolerationsToDeployment(ctx, tolerations, "coredns", "kube-system"); err != nil {
return err
}
selectors := map[string]string{
"node-role.kubernetes.io/control-plane": "",
}
if err = kubectl.AddNodeSelectorsToDeployment(ctx, selectors, "coredns", "kube-system"); err != nil {
return err
}
host, port, err := net.SplitHostPort(kubeAPIEndpoint)
if err != nil {
return err
}
// configure pod network CIDR
ciliumDeployment.Values["ipv4NativeRoutingCIDR"] = subnetworkPodCIDR
ciliumDeployment.Values["strictModeCIDR"] = subnetworkPodCIDR
ciliumDeployment.Values["k8sServiceHost"] = host
if port != "" {
ciliumDeployment.Values["k8sServicePort"] = port
}
_, err = helmClient.RunWithContext(ctx, ciliumDeployment.Chart, ciliumDeployment.Values)
if err != nil {
return fmt.Errorf("installing cilium: %w", err)
}
return nil
}
// FixCilium fixes https://github.com/cilium/cilium/issues/19958 but instead of a rollout restart of
// the cilium daemonset, it only restarts the local cilium pod.
func (k *KubernetesUtil) FixCilium(log *logger.Logger) {
@ -423,26 +309,6 @@ func (k *KubernetesUtil) FixCilium(log *logger.Logger) {
}
}
func (k *KubernetesUtil) deployCiliumQEMU(ctx context.Context, helmClient *action.Install, ciliumDeployment helm.Deployment, subnetworkPodCIDR, kubeAPIEndpoint string) error {
// configure pod network CIDR
ciliumDeployment.Values["ipam"] = map[string]interface{}{
"operator": map[string]interface{}{
"clusterPoolIPv4PodCIDRList": []interface{}{
subnetworkPodCIDR,
},
},
}
ciliumDeployment.Values["k8sServiceHost"] = kubeAPIEndpoint
ciliumDeployment.Values["k8sServicePort"] = strconv.Itoa(constants.KubernetesPort)
_, err := helmClient.RunWithContext(ctx, ciliumDeployment.Chart, ciliumDeployment.Values)
if err != nil {
return fmt.Errorf("installing cilium: %w", err)
}
return nil
}
// SetupAutoscaling deploys the k8s cluster autoscaler.
func (k *KubernetesUtil) SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error {
if err := kubectl.Apply(secrets, true); err != nil {
@ -485,14 +351,6 @@ func (k *KubernetesUtil) SetupAccessManager(kubectl Client, accessManagerConfigu
return kubectl.Apply(accessManagerConfiguration, true)
}
// SetupKMS deploys the KMS deployment.
func (k *KubernetesUtil) SetupKMS(kubectl Client, kmsConfiguration kubernetes.Marshaler) error {
if err := kubectl.Apply(kmsConfiguration, true); err != nil {
return fmt.Errorf("applying KMS configuration: %w", err)
}
return nil
}
// SetupVerificationService deploys the verification service.
func (k *KubernetesUtil) SetupVerificationService(kubectl Client, verificationServiceConfiguration kubernetes.Marshaler) error {
return kubectl.Apply(verificationServiceConfiguration, true)

View file

@ -1,259 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/versions"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
const kmsNamespace = "kube-system"
type KMSDeployment struct {
ServiceAccount k8s.ServiceAccount
Service k8s.Service
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
Deployment apps.DaemonSet
MasterSecret k8s.Secret
}
// KMSConfig is the configuration needed to set up Constellation's key management service.
type KMSConfig struct {
MasterSecret []byte
Salt []byte
KMSURI string
StorageURI string
KeyEncryptionKeyID string
UseExistingKEK bool
}
// NewKMSDeployment creates a new *kmsDeployment to use as the key management system inside Constellation.
func NewKMSDeployment(csp string, config KMSConfig) *KMSDeployment {
return &KMSDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: kmsNamespace,
},
},
Service: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: kmsNamespace,
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeClusterIP,
Ports: []k8s.ServicePort{
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.KMSPort,
TargetPort: intstr.FromInt(constants.KMSPort),
},
},
Selector: map[string]string{
"k8s-app": "kms",
},
},
},
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Labels: map[string]string{
"k8s-app": "kms",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{"get"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "kms",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "kms",
Namespace: kmsNamespace,
},
},
},
Deployment: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kms",
"component": "kms",
"kubernetes.io/cluster-service": "true",
},
Name: "kms",
Namespace: kmsNamespace,
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kms",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kms",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
// Only run on control plane nodes
NodeSelector: map[string]string{
"node-role.kubernetes.io/control-plane": "",
},
Volumes: []k8s.Volume{
{
Name: "config",
VolumeSource: k8s.VolumeSource{
Projected: &k8s.ProjectedVolumeSource{
Sources: []k8s.VolumeProjection{
{
ConfigMap: &k8s.ConfigMapProjection{
LocalObjectReference: k8s.LocalObjectReference{
Name: "join-config",
},
Items: []k8s.KeyToPath{
{
Key: constants.MeasurementsFilename,
Path: constants.MeasurementsFilename,
},
},
},
},
{
Secret: &k8s.SecretProjection{
LocalObjectReference: k8s.LocalObjectReference{
Name: constants.ConstellationMasterSecretStoreName,
},
Items: []k8s.KeyToPath{
{
Key: constants.ConstellationMasterSecretKey,
Path: constants.ConstellationMasterSecretKey,
},
{
Key: constants.ConstellationMasterSecretSalt,
Path: constants.ConstellationMasterSecretSalt,
},
},
},
},
},
},
},
},
},
ServiceAccountName: "kms",
Containers: []k8s.Container{
{
Name: "kms",
Image: versions.KmsImage,
Args: []string{
fmt.Sprintf("--port=%d", constants.KMSPort),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: constants.ServiceBasePath,
},
},
},
},
},
},
},
},
MasterSecret: k8s.Secret{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: meta.ObjectMeta{
Name: constants.ConstellationMasterSecretStoreName,
Namespace: kmsNamespace,
},
Data: map[string][]byte{
constants.ConstellationMasterSecretKey: config.MasterSecret,
constants.ConstellationMasterSecretSalt: config.Salt,
},
Type: "Opaque",
},
}
}
func (c *KMSDeployment) Marshal() ([]byte, error) {
return kubernetes.MarshalK8SResources(c)
}

View file

@ -1,28 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"testing"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestKMSMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
kmsDepl := NewKMSDeployment("test", KMSConfig{MasterSecret: []byte{0x0, 0x1, 0x2}, Salt: []byte{0x3, 0x4, 0x5}})
data, err := kmsDepl.Marshal()
require.NoError(err)
var recreated KMSDeployment
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
assert.Equal(kmsDepl, &recreated)
}