Rename coordinator to bootstrapper and rename roles

This commit is contained in:
katexochen 2022-06-29 15:26:29 +02:00 committed by Paul Meyer
parent 3280ed200c
commit 916e5d6b55
191 changed files with 1763 additions and 2030 deletions

View file

@ -0,0 +1,201 @@
package resources
import (
"github.com/edgelesssys/constellation/internal/secrets"
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// accessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
type accessManagerDeployment struct {
ConfigMap k8s.ConfigMap
ServiceAccount k8s.ServiceAccount
Role rbac.Role
RoleBinding rbac.RoleBinding
DaemonSet apps.DaemonSet
ImagePullSecret k8s.Secret
}
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeployment {
return &accessManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
},
AutomountServiceAccountToken: proto.Bool(true),
},
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: v1.ObjectMeta{
Name: "ssh-users",
Namespace: "kube-system",
},
Data: sshUsers,
},
DaemonSet: apps.DaemonSet{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: v1.ObjectMeta{
Name: "constellation-access-manager",
Namespace: "kube-system",
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
},
},
Spec: apps.DaemonSetSpec{
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
},
},
Spec: k8s.PodSpec{
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Containers: []k8s.Container{
{
Name: "pause",
Image: "gcr.io/google_containers/pause",
ImagePullPolicy: k8s.PullIfNotPresent,
},
},
InitContainers: []k8s.Container{
{
Name: "constellation-access-manager",
Image: accessManagerImage,
VolumeMounts: []k8s.VolumeMount{
{
Name: "host",
MountPath: "/host",
},
},
SecurityContext: &k8s.SecurityContext{
Capabilities: &k8s.Capabilities{
Add: []k8s.Capability{
"SYS_CHROOT",
},
},
},
},
},
ServiceAccountName: "constellation-access-manager",
Volumes: []k8s.Volume{
{
Name: "host",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/",
},
},
},
},
},
},
},
},
Role: rbac.Role{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
ResourceNames: []string{
"ssh-users",
},
Verbs: []string{
"get",
},
},
},
},
RoleBinding: rbac.RoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "constellation-access-manager",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-access-manager",
Namespace: "kube-system",
},
},
},
ImagePullSecret: NewImagePullSecret(),
}
}
// Marshal marshals the access-manager deployment as YAML documents.
func (c *accessManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,37 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestAccessManagerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
// Without data
accessManagerDeplNil := NewAccessManagerDeployment(nil)
data, err := accessManagerDeplNil.Marshal()
require.NoError(err)
var recreated accessManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(accessManagerDeplNil, &recreated)
// With data
sshUsers := make(map[string]string)
sshUsers["test-user"] = "ssh-rsa abcdefg"
accessManagerDeplNil = NewAccessManagerDeployment(sshUsers)
data, err = accessManagerDeplNil.Marshal()
require.NoError(err)
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(accessManagerDeplNil, &recreated)
}

View file

@ -0,0 +1,245 @@
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/secrets"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type activationDaemonset struct {
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
ConfigMap k8s.ConfigMap
DaemonSet apps.DaemonSet
ServiceAccount k8s.ServiceAccount
Service k8s.Service
}
// NewActivationDaemonset returns a daemonset for the activation service.
func NewActivationDaemonset(csp, measurementsJSON, idJSON string) *activationDaemonset {
return &activationDaemonset{
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Labels: map[string]string{
"k8s-app": "activation-service",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{"get", "list", "create", "update"},
},
{
APIGroups: []string{"rbac.authorization.k8s.io"},
Resources: []string{"roles", "rolebindings"},
Verbs: []string{"create", "update"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "activation-service",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "activation-service",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "activation-service",
"component": "activation-service",
"kubernetes.io/cluster-service": "true",
},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "activation-service",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "activation-service",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
ServiceAccountName: "activation-service",
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
// Only run on control plane nodes
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Containers: []k8s.Container{
{
Name: "activation-service",
Image: activationImage,
Ports: []k8s.ContainerPort{
{
ContainerPort: constants.ActivationServicePort,
Name: "tcp",
},
},
SecurityContext: &k8s.SecurityContext{
Privileged: func(b bool) *bool { return &b }(true),
},
Args: []string{
fmt.Sprintf("--cloud-provider=%s", csp),
fmt.Sprintf("--kms-endpoint=kms.kube-system:%d", constants.KMSPort),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: constants.ServiceBasePath,
},
{
Name: "kubeadm",
ReadOnly: true,
MountPath: "/etc/kubernetes",
},
},
},
},
Volumes: []k8s.Volume{
{
Name: "config",
VolumeSource: k8s.VolumeSource{
ConfigMap: &k8s.ConfigMapVolumeSource{
LocalObjectReference: k8s.LocalObjectReference{
Name: "activation-config",
},
},
},
},
{
Name: "kubeadm",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/etc/kubernetes",
},
},
},
},
},
},
},
},
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Namespace: "kube-system",
},
},
Service: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-service",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeNodePort,
Ports: []k8s.ServicePort{
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.ActivationServicePort,
TargetPort: intstr.IntOrString{IntVal: constants.ActivationServicePort},
NodePort: constants.ActivationServiceNodePort,
},
},
Selector: map[string]string{
"k8s-app": "activation-service",
},
},
},
ConfigMap: k8s.ConfigMap{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: meta.ObjectMeta{
Name: "activation-config",
Namespace: "kube-system",
},
Data: map[string]string{
"measurements": measurementsJSON,
"id": idJSON,
},
},
}
}
// Marshal the daemonset using the Kubernetes resource marshaller.
func (a *activationDaemonset) Marshal() ([]byte, error) {
return MarshalK8SResources(a)
}

View file

@ -0,0 +1,18 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewActivationDaemonset(t *testing.T) {
deployment := NewActivationDaemonset("csp", "measurementsJSON", "idJSON")
deploymentYAML, err := deployment.Marshal()
require.NoError(t, err)
var recreated activationDaemonset
require.NoError(t, UnmarshalK8SResources(deploymentYAML, &recreated))
assert.Equal(t, deployment, &recreated)
}

View file

@ -0,0 +1,33 @@
package resources
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
)
// AuditPolicy defines rulesets for what should be logged in the kube-apiserver audit log.
// reference: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ .
type AuditPolicy struct {
Policy auditv1.Policy
}
func NewDefaultAuditPolicy() *AuditPolicy {
return &AuditPolicy{
Policy: auditv1.Policy{
TypeMeta: v1.TypeMeta{
APIVersion: "audit.k8s.io/v1",
Kind: "Policy",
},
Rules: []auditv1.PolicyRule{
{
Level: auditv1.LevelMetadata,
},
},
},
}
}
// Marshal marshals the audit policy as a YAML document.
func (p *AuditPolicy) Marshal() ([]byte, error) {
return MarshalK8SResources(p)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAuditPolicyMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
auditPolicy := NewDefaultAuditPolicy()
data, err := auditPolicy.Marshal()
require.NoError(err)
var recreated AuditPolicy
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(auditPolicy, &recreated)
}

View file

@ -0,0 +1,172 @@
package resources
import (
"fmt"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type cloudControllerManagerDeployment struct {
ServiceAccount k8s.ServiceAccount
ClusterRoleBinding rbac.ClusterRoleBinding
DaemonSet apps.DaemonSet
}
// references:
// https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/admin/cloud/ccm-example.yaml
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
command := []string{
path,
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
"--leader-elect=true",
fmt.Sprintf("--cluster-cidr=%s", podCIDR),
"-v=2",
}
command = append(command, extraArgs...)
volumes := []k8s.Volume{
{
Name: "etckubernetes",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/kubernetes"},
},
},
{
Name: "etcssl",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/ssl"},
},
},
{
Name: "etcpki",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{Path: "/etc/pki"},
},
},
}
volumes = append(volumes, extraVolumes...)
volumeMounts := []k8s.VolumeMount{
{
MountPath: "/etc/kubernetes",
Name: "etckubernetes",
ReadOnly: true,
},
{
MountPath: "/etc/ssl",
Name: "etcssl",
ReadOnly: true,
},
{
MountPath: "/etc/pki",
Name: "etcpki",
ReadOnly: true,
},
}
volumeMounts = append(volumeMounts, extraVolumeMounts...)
return &cloudControllerManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "system:cloud-controller-manager",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
Name: "cloud-controller-manager",
Namespace: "kube-system",
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "cloud-controller-manager",
},
},
Spec: k8s.PodSpec{
ServiceAccountName: "cloud-controller-manager",
Containers: []k8s.Container{
{
Name: "cloud-controller-manager",
Image: image,
Command: command,
VolumeMounts: volumeMounts,
Env: env,
},
},
Volumes: volumes,
Tolerations: []k8s.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/master",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node.kubernetes.io/not-ready",
Effect: k8s.TaintEffectNoSchedule,
},
},
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
},
},
},
},
}
}
func (c *cloudControllerManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,22 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8s "k8s.io/api/core/v1"
)
func TestCloudControllerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path", "192.0.2.0/24", []string{}, []k8s.Volume{}, []k8s.VolumeMount{}, nil)
data, err := cloudControllerManagerDepl.Marshal()
require.NoError(err)
var recreated cloudControllerManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(cloudControllerManagerDepl, &recreated)
}

View file

@ -0,0 +1,180 @@
package resources
import (
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type cloudNodeManagerDeployment struct {
ServiceAccount k8s.ServiceAccount
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
DaemonSet apps.DaemonSet
}
// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP.
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *cloudNodeManagerDeployment {
command := []string{
path,
"--node-name=$(NODE_NAME)",
}
command = append(command, extraArgs...)
return &cloudNodeManagerDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
},
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Labels: map[string]string{
"k8s-app": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"watch", "list", "get", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes/status"},
Verbs: []string{"patch"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Labels: map[string]string{
"k8s-app": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cloud-node-manager",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "cloud-node-manager",
Namespace: "kube-system",
},
},
},
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Name: "cloud-node-manager",
Namespace: "kube-system",
Labels: map[string]string{
"component": "cloud-node-manager",
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{"k8s-app": "cloud-node-manager"},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{"k8s-app": "cloud-node-manager"},
Annotations: map[string]string{"cluster-autoscaler.kubernetes.io/daemonset-pod": "true"},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-node-critical",
ServiceAccountName: "cloud-node-manager",
HostNetwork: true,
NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
Containers: []k8s.Container{
{
Name: "cloud-node-manager",
Image: image,
ImagePullPolicy: k8s.PullIfNotPresent,
Command: command,
Env: []k8s.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &k8s.EnvVarSource{
FieldRef: &k8s.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
Resources: k8s.ResourceRequirements{
Requests: k8s.ResourceList{
k8s.ResourceCPU: resource.MustParse("50m"),
k8s.ResourceMemory: resource.MustParse("50Mi"),
},
},
},
},
},
},
},
},
}
}
// Marshal marshals the cloud-node-manager deployment as YAML documents.
func (c *cloudNodeManagerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCloudNodeManagerMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cloudNodeManagerDepl := NewDefaultCloudNodeManagerDeployment("image", "path", []string{})
data, err := cloudNodeManagerDepl.Marshal()
require.NoError(err)
var recreated cloudNodeManagerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(cloudNodeManagerDepl, &recreated)
}

View file

@ -0,0 +1,505 @@
package resources
import (
"google.golang.org/protobuf/proto"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
rbac "k8s.io/api/rbac/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type autoscalerDeployment struct {
PodDisruptionBudget policy.PodDisruptionBudget
ServiceAccount k8s.ServiceAccount
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
Role rbac.Role
RoleBinding rbac.RoleBinding
Service k8s.Service
Deployment apps.Deployment
}
// NewDefaultAutoscalerDeployment creates a new *autoscalerDeployment, customized for the CSP.
func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *autoscalerDeployment {
return &autoscalerDeployment{
PodDisruptionBudget: policy.PodDisruptionBudget{
TypeMeta: v1.TypeMeta{
APIVersion: "policy/v1",
Kind: "PodDisruptionBudget",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "default",
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
},
},
},
ServiceAccount: k8s.ServiceAccount{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
AutomountServiceAccountToken: proto.Bool(true),
},
ClusterRole: rbac.ClusterRole{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"events",
"endpoints",
},
Verbs: []string{
"create",
"patch",
},
},
{
APIGroups: []string{""},
Resources: []string{
"pods/eviction",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{""},
Resources: []string{
"pods/status",
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"endpoints",
},
ResourceNames: []string{
"cluster-autoscaler",
},
Verbs: []string{
"get",
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"nodes",
},
Verbs: []string{
"watch",
"list",
"get",
"update",
},
},
{
APIGroups: []string{""},
Resources: []string{
"namespaces",
"pods",
"services",
"replicationcontrollers",
"persistentvolumeclaims",
"persistentvolumes",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"batch",
},
Resources: []string{
"jobs",
"cronjobs",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"batch",
"extensions",
},
Resources: []string{
"jobs",
},
Verbs: []string{
"get",
"list",
"patch",
"watch",
},
},
{
APIGroups: []string{
"extensions",
},
Resources: []string{
"replicasets",
"daemonsets",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"policy",
},
Resources: []string{
"poddisruptionbudgets",
},
Verbs: []string{
"watch",
"list",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"daemonsets",
"replicasets",
"statefulsets",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{
"storage.k8s.io",
},
Resources: []string{
"storageclasses",
"csinodes",
"csidrivers",
"csistoragecapacities",
},
Verbs: []string{
"watch",
"list",
"get",
},
},
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
Verbs: []string{
"list",
"watch",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
Resources: []string{
"leases",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
ResourceNames: []string{
"cluster-autoscaler",
},
Resources: []string{
"leases",
},
Verbs: []string{
"get",
"update",
},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "constellation-cluster-autoscaler",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
},
},
Role: rbac.Role{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{""},
Resources: []string{
"configmaps",
},
ResourceNames: []string{
"cluster-autoscaler-status",
},
Verbs: []string{
"delete",
"get",
"update",
},
},
},
},
RoleBinding: rbac.RoleBinding{
TypeMeta: v1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "constellation-cluster-autoscaler",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
},
},
Service: k8s.Service{
TypeMeta: v1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "default",
},
Spec: k8s.ServiceSpec{
Ports: []k8s.ServicePort{
{
Port: 8085,
Protocol: k8s.ProtocolTCP,
TargetPort: intstr.FromInt(8085),
Name: "http",
},
},
Selector: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
Type: k8s.ServiceTypeClusterIP,
},
},
Deployment: apps.Deployment{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-cluster-autoscaler",
Namespace: "kube-system",
},
Spec: apps.DeploymentSpec{
Replicas: proto.Int32(1),
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "cluster-autoscaler",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
DNSPolicy: k8s.DNSClusterFirst,
Containers: []k8s.Container{
{
Name: "cluster-autoscaler",
Image: clusterAutoscalerImage,
ImagePullPolicy: k8s.PullIfNotPresent,
LivenessProbe: &k8s.Probe{
ProbeHandler: k8s.ProbeHandler{
HTTPGet: &k8s.HTTPGetAction{
Path: "/health-check",
Port: intstr.FromInt(8085),
},
},
},
Ports: []k8s.ContainerPort{
{
ContainerPort: 8085,
},
},
VolumeMounts: extraVolumeMounts,
Env: env,
},
},
Volumes: extraVolumes,
ServiceAccountName: "constellation-cluster-autoscaler",
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
},
NodeSelector: map[string]string{
"node-role.kubernetes.io/control-plane": "",
},
},
},
},
},
}
}
func (a *autoscalerDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(a)
}
func (a *autoscalerDeployment) SetAutoscalerCommand(cloudprovider string, autoscalingNodeGroups []string) {
command := []string{
"./cluster-autoscaler",
"--cloud-provider",
cloudprovider,
"--logtostderr=true",
"--stderrthreshold=info",
"--v=2",
"--namespace=kube-system",
}
for _, autoscalingNodeGroup := range autoscalingNodeGroups {
command = append(command, "--nodes", autoscalingNodeGroup)
}
a.Deployment.Spec.Template.Spec.Containers[0].Command = command
}

View file

@ -0,0 +1,41 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAutoscalerDeploymentMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
autoscalerDepl := NewDefaultAutoscalerDeployment(nil, nil, nil)
data, err := autoscalerDepl.Marshal()
require.NoError(err)
t.Log(string(data))
var recreated autoscalerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(autoscalerDepl, &recreated)
}
func TestAutoscalerDeploymentWithCommandMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
autoscalerDepl := NewDefaultAutoscalerDeployment(nil, nil, nil)
autoscalerDepl.SetAutoscalerCommand("someProvider", []string{"group1", "group2"})
data, err := autoscalerDepl.Marshal()
require.NoError(err)
t.Log(string(data))
var recreated autoscalerDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(autoscalerDepl, &recreated)
}

View file

@ -0,0 +1,18 @@
package resources
import (
k8s "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// ConfigMaps represent a list of k8s ConfigMap.
type ConfigMaps []*k8s.ConfigMap
// Marshal marshals config maps into multiple YAML documents.
func (s ConfigMaps) Marshal() ([]byte, error) {
objects := make([]runtime.Object, len(s))
for i := range s {
objects[i] = s[i]
}
return MarshalK8SResourcesList(objects)
}

View file

@ -0,0 +1,49 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8s "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestConfigMaps(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
configMaps := ConfigMaps{
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{"key": "value1"},
},
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{"key": "value2"},
},
}
data, err := configMaps.Marshal()
require.NoError(err)
assert.Equal(`apiVersion: v1
data:
key: value1
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: value2
kind: ConfigMap
metadata:
creationTimestamp: null
`, string(data))
}

View file

@ -0,0 +1,32 @@
package resources
import (
"encoding/base64"
"fmt"
"github.com/edgelesssys/constellation/internal/secrets"
k8s "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewImagePullSecret creates a new k8s.Secret from the config for authenticating when pulling images.
func NewImagePullSecret() k8s.Secret {
base64EncodedSecret := base64.StdEncoding.EncodeToString(
[]byte(fmt.Sprintf("%s:%s", secrets.PullSecretUser, secrets.PullSecretToken)),
)
pullSecretDockerCfgJson := fmt.Sprintf(`{"auths":{"ghcr.io":{"auth":"%s"}}}`, base64EncodedSecret)
return k8s.Secret{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: meta.ObjectMeta{
Name: secrets.PullSecretName,
Namespace: "kube-system",
},
StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJson},
Type: "kubernetes.io/dockerconfigjson",
}
}

View file

@ -0,0 +1,13 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestImagePullSecret(t *testing.T) {
imgPullSec := NewImagePullSecret()
_, err := imgPullSec.Marshal()
assert.NoError(t, err)
}

View file

@ -0,0 +1,12 @@
package resources
const (
// Constellation images.
activationImage = "ghcr.io/edgelesssys/constellation/activation-service:v1.2"
accessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v1.2"
kmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v1.2"
verificationImage = "ghcr.io/edgelesssys/constellation/verification-service:v1.2"
// external images.
clusterAutoscalerImage = "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.0"
)

View file

@ -0,0 +1,268 @@
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/secrets"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type kmsDeployment struct {
ServiceAccount k8s.ServiceAccount
ServiceInternal k8s.Service
ServiceExternal k8s.Service
ClusterRole rbac.ClusterRole
ClusterRoleBinding rbac.ClusterRoleBinding
Deployment apps.Deployment
MasterSecret k8s.Secret
ImagePullSecret k8s.Secret
}
// NewKMSDeployment creates a new *kmsDeployment to use as the key management system inside Constellation.
func NewKMSDeployment(csp string, masterSecret []byte) *kmsDeployment {
return &kmsDeployment{
ServiceAccount: k8s.ServiceAccount{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: "kube-system",
},
},
ServiceInternal: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeClusterIP,
Ports: []k8s.ServicePort{
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.KMSPort,
TargetPort: intstr.FromInt(constants.KMSPort),
},
},
Selector: map[string]string{
"k8s-app": "kms",
},
},
},
ServiceExternal: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms-external",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeNodePort,
Ports: []k8s.ServicePort{
{
Name: "atls",
Protocol: k8s.ProtocolTCP,
Port: constants.KMSATLSPort,
TargetPort: intstr.FromInt(constants.KMSATLSPort),
NodePort: constants.KMSNodePort,
},
},
Selector: map[string]string{
"k8s-app": "kms",
},
},
},
ClusterRole: rbac.ClusterRole{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Labels: map[string]string{
"k8s-app": "kms",
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{"get"},
},
},
},
ClusterRoleBinding: rbac.ClusterRoleBinding{
TypeMeta: meta.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "kms",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "kms",
Namespace: "kube-system",
},
},
},
Deployment: apps.Deployment{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kms",
},
Name: "kms",
Namespace: "kube-system",
},
Spec: apps.DeploymentSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kms",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "kms",
},
},
Spec: k8s.PodSpec{
PriorityClassName: "system-cluster-critical",
Tolerations: []k8s.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: k8s.TolerationOpExists,
},
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
// Only run on control plane nodes
NodeSelector: map[string]string{
"node-role.kubernetes.io/master": "",
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Volumes: []k8s.Volume{
{
Name: "config",
VolumeSource: k8s.VolumeSource{
Projected: &k8s.ProjectedVolumeSource{
Sources: []k8s.VolumeProjection{
{
ConfigMap: &k8s.ConfigMapProjection{
LocalObjectReference: k8s.LocalObjectReference{
Name: "activation-config",
},
Items: []k8s.KeyToPath{
{
Key: constants.MeasurementsFilename,
Path: constants.MeasurementsFilename,
},
},
},
},
{
Secret: &k8s.SecretProjection{
LocalObjectReference: k8s.LocalObjectReference{
Name: constants.ConstellationMasterSecretStoreName,
},
Items: []k8s.KeyToPath{
{
Key: constants.ConstellationMasterSecretKey,
Path: constants.MasterSecretFilename,
},
},
},
},
},
},
},
},
},
ServiceAccountName: "kms",
Containers: []k8s.Container{
{
Name: "kms",
Image: kmsImage,
Args: []string{
fmt.Sprintf("--atls-port=%d", constants.KMSATLSPort),
fmt.Sprintf("--port=%d", constants.KMSPort),
fmt.Sprintf("--cloud-provider=%s", csp),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: constants.ServiceBasePath,
},
},
},
},
},
},
},
},
MasterSecret: k8s.Secret{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: meta.ObjectMeta{
Name: constants.ConstellationMasterSecretStoreName,
Namespace: "kube-system",
},
Data: map[string][]byte{
constants.ConstellationMasterSecretKey: masterSecret,
},
Type: "Opaque",
},
ImagePullSecret: NewImagePullSecret(),
}
}
func (c *kmsDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,22 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestKMSMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
testMS := []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
kmsDepl := NewKMSDeployment("test", testMS)
data, err := kmsDepl.Marshal()
require.NoError(err)
var recreated kmsDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(kmsDepl, &recreated)
}

View file

@ -0,0 +1,149 @@
package resources
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/kubernetes/scheme"
)
// Marshaler is used by all k8s resources that can be marshaled to YAML.
type Marshaler interface {
Marshal() ([]byte, error)
}
// MarshalK8SResources marshals every field of a struct into a k8s resource YAML.
func MarshalK8SResources(resources any) ([]byte, error) {
if resources == nil {
return nil, errors.New("marshal on nil called")
}
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
// reflect over struct containing fields that are k8s resources
value := reflect.ValueOf(resources)
if value.Kind() != reflect.Ptr && value.Kind() != reflect.Interface {
return nil, errors.New("marshal on non-pointer called")
}
elem := value.Elem()
if elem.Kind() == reflect.Struct {
// iterate over all struct fields
for i := 0; i < elem.NumField(); i++ {
field := elem.Field(i)
var inter any
// check if value can be converted to interface
if field.CanInterface() {
inter = field.Addr().Interface()
} else {
continue
}
// convert field interface to runtime.Object
obj, ok := inter.(runtime.Object)
if !ok {
continue
}
if i > 0 {
// separate YAML documents
buf.Write([]byte("---\n"))
}
// serialize k8s resource
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
}
}
return buf.Bytes(), nil
}
// UnmarshalK8SResources takes YAML and converts it into a k8s resources struct.
func UnmarshalK8SResources(data []byte, into any) error {
if into == nil {
return errors.New("unmarshal on nil called")
}
// reflect over struct containing fields that are k8s resources
value := reflect.ValueOf(into).Elem()
if value.Kind() != reflect.Struct {
return errors.New("can only reflect over struct")
}
decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder()
documents, err := splitYAML(data)
if err != nil {
return fmt.Errorf("splitting deployment YAML into multiple documents: %w", err)
}
if len(documents) != value.NumField() {
return fmt.Errorf("expected %v YAML documents, got %v", value.NumField(), len(documents))
}
for i := 0; i < value.NumField(); i++ {
field := value.Field(i)
var inter any
// check if value can be converted to interface
if !field.CanInterface() {
return fmt.Errorf("cannot use struct field %v as interface", i)
}
inter = field.Addr().Interface()
// convert field interface to runtime.Object
obj, ok := inter.(runtime.Object)
if !ok {
return fmt.Errorf("cannot convert struct field %v as k8s runtime object", i)
}
// decode YAML document into struct field
if err := runtime.DecodeInto(decoder, documents[i], obj); err != nil {
return err
}
}
return nil
}
// MarshalK8SResourcesList marshals every element of a slice into a k8s resource YAML.
func MarshalK8SResourcesList(resources []runtime.Object) ([]byte, error) {
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil)
var buf bytes.Buffer
for i, obj := range resources {
if i > 0 {
// separate YAML documents
buf.Write([]byte("---\n"))
}
// serialize k8s resource
if err := serializer.Encode(obj, &buf); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
// splitYAML splits a YAML multidoc into a slice of multiple YAML docs.
func splitYAML(resources []byte) ([][]byte, error) {
dec := yaml.NewDecoder(bytes.NewReader(resources))
var res [][]byte
for {
var value any
err := dec.Decode(&value)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
valueBytes, err := yaml.Marshal(value)
if err != nil {
return nil, err
}
res = append(res, valueBytes)
}
return res, nil
}

View file

@ -0,0 +1,360 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
k8s "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func TestMarshalK8SResources(t *testing.T) {
testCases := map[string]struct {
resources any
wantErr bool
wantYAML string
}{
"ConfigMap as only field can be marshaled": {
resources: &struct {
ConfigMap k8s.ConfigMap
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
},
"Multiple fields are correctly encoded": {
resources: &struct {
ConfigMap k8s.ConfigMap
Secret k8s.Secret
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
Secret: k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"key": []byte("value"),
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
},
"Non-pointer is detected": {
resources: "non-pointer",
wantErr: true,
},
"Nil resource pointer is detected": {
resources: nil,
wantErr: true,
},
"Non-pointer field is ignored": {
resources: &struct{ String string }{String: "somestring"},
},
"nil field is ignored": {
resources: &struct {
ConfigMap *k8s.ConfigMap
}{
ConfigMap: nil,
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
yaml, err := MarshalK8SResources(tc.resources)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantYAML, string(yaml))
})
}
}
func TestUnmarshalK8SResources(t *testing.T) {
testCases := map[string]struct {
data string
into any
wantObj any
wantErr bool
}{
"ConfigMap as only field can be unmarshaled": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
into: &struct {
ConfigMap k8s.ConfigMap
}{},
wantObj: &struct {
ConfigMap k8s.ConfigMap
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
},
},
"Multiple fields are correctly unmarshaled": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
into: &struct {
ConfigMap k8s.ConfigMap
Secret k8s.Secret
}{},
wantObj: &struct {
ConfigMap k8s.ConfigMap
Secret k8s.Secret
}{
ConfigMap: k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
Secret: k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"key": []byte("value"),
},
},
},
},
"Mismatching amount of fields is detected": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
into: &struct {
ConfigMap k8s.ConfigMap
}{},
wantErr: true,
},
"Non-struct pointer is detected": {
into: proto.String("test"),
wantErr: true,
},
"Nil into is detected": {
into: nil,
wantErr: true,
},
"Invalid yaml is detected": {
data: `duplicateKey: value
duplicateKey: value`,
into: &struct {
ConfigMap k8s.ConfigMap
}{},
wantErr: true,
},
"Struct field cannot interface with runtime.Object": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
into: &struct {
String string
}{},
wantErr: true,
},
"Struct field mismatch": {
data: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
into: &struct {
Secret k8s.Secret
}{},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
err := UnmarshalK8SResources([]byte(tc.data), tc.into)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantObj, tc.into)
})
}
}
func TestMarshalK8SResourcesList(t *testing.T) {
testCases := map[string]struct {
resources []runtime.Object
wantErr bool
wantYAML string
}{
"ConfigMap as only element be marshaled": {
resources: []runtime.Object{
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
`,
},
"Multiple fields are correctly encoded": {
resources: []runtime.Object{
&k8s.ConfigMap{
TypeMeta: v1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
Data: map[string]string{
"key": "value",
},
},
&k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"key": []byte("value"),
},
},
},
wantYAML: `apiVersion: v1
data:
key: value
kind: ConfigMap
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWU=
kind: Secret
metadata:
creationTimestamp: null
`,
},
"Nil resource pointer is encodes": {
resources: []runtime.Object{nil},
wantYAML: "null\n",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
yaml, err := MarshalK8SResourcesList(tc.resources)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Equal(tc.wantYAML, string(yaml))
})
}
}

View file

@ -0,0 +1,18 @@
package resources
import (
k8s "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// ConfigMaps represent a list of k8s Secret.
type Secrets []*k8s.Secret
// Marshal marshals secrets into multiple YAML documents.
func (s Secrets) Marshal() ([]byte, error) {
objects := make([]runtime.Object, len(s))
for i := range s {
objects[i] = s[i]
}
return MarshalK8SResourcesList(objects)
}

View file

@ -0,0 +1,49 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8s "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestSecrets(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
secrets := Secrets{
&k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{"key": []byte("value1")},
},
&k8s.Secret{
TypeMeta: v1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{"key": []byte("value2")},
},
}
data, err := secrets.Marshal()
require.NoError(err)
assert.Equal(`apiVersion: v1
data:
key: dmFsdWUx
kind: Secret
metadata:
creationTimestamp: null
---
apiVersion: v1
data:
key: dmFsdWUy
kind: Secret
metadata:
creationTimestamp: null
`, string(data))
}

View file

@ -0,0 +1,153 @@
package resources
import (
"fmt"
"github.com/edgelesssys/constellation/internal/constants"
"github.com/edgelesssys/constellation/internal/secrets"
apps "k8s.io/api/apps/v1"
k8s "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
type verificationDaemonset struct {
DaemonSet apps.DaemonSet
Service k8s.Service
}
func NewVerificationDaemonSet(csp string) *verificationDaemonset {
return &verificationDaemonset{
DaemonSet: apps.DaemonSet{
TypeMeta: meta.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: meta.ObjectMeta{
Name: "verification-service",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "verification-service",
"component": "verification-service",
},
},
Spec: apps.DaemonSetSpec{
Selector: &meta.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "verification-service",
},
},
Template: k8s.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
"k8s-app": "verification-service",
},
},
Spec: k8s.PodSpec{
Tolerations: []k8s.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: k8s.TolerationOpEqual,
Value: "true",
Effect: k8s.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoExecute,
},
{
Operator: k8s.TolerationOpExists,
Effect: k8s.TaintEffectNoSchedule,
},
},
ImagePullSecrets: []k8s.LocalObjectReference{
{
Name: secrets.PullSecretName,
},
},
Containers: []k8s.Container{
{
Name: "verification-service",
Image: verificationImage,
Ports: []k8s.ContainerPort{
{
Name: "http",
ContainerPort: constants.VerifyServicePortHTTP,
},
{
Name: "grpc",
ContainerPort: constants.VerifyServicePortGRPC,
},
},
SecurityContext: &k8s.SecurityContext{
Privileged: func(b bool) *bool { return &b }(true),
},
Args: []string{
fmt.Sprintf("--cloud-provider=%s", csp),
},
VolumeMounts: []k8s.VolumeMount{
{
Name: "event-log",
ReadOnly: true,
MountPath: "/sys/kernel/security/",
},
},
},
},
Volumes: []k8s.Volume{
{
Name: "event-log",
VolumeSource: k8s.VolumeSource{
HostPath: &k8s.HostPathVolumeSource{
Path: "/sys/kernel/security/",
},
},
},
},
},
},
},
},
Service: k8s.Service{
TypeMeta: meta.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: meta.ObjectMeta{
Name: "verification-service",
Namespace: "kube-system",
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeNodePort,
Ports: []k8s.ServicePort{
{
Name: "http",
Protocol: k8s.ProtocolTCP,
Port: constants.VerifyServicePortHTTP,
TargetPort: intstr.FromInt(constants.VerifyServicePortHTTP),
NodePort: constants.VerifyServiceNodePortHTTP,
},
{
Name: "grpc",
Protocol: k8s.ProtocolTCP,
Port: constants.VerifyServicePortGRPC,
TargetPort: intstr.FromInt(constants.VerifyServicePortGRPC),
NodePort: constants.VerifyServiceNodePortGRPC,
},
},
Selector: map[string]string{
"k8s-app": "verification-service",
},
},
},
}
}
func (v *verificationDaemonset) Marshal() ([]byte, error) {
return MarshalK8SResources(v)
}

View file

@ -0,0 +1,18 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewVerificationDaemonset(t *testing.T) {
deployment := NewVerificationDaemonSet("csp")
deploymentYAML, err := deployment.Marshal()
require.NoError(t, err)
var recreated verificationDaemonset
require.NoError(t, UnmarshalK8SResources(deploymentYAML, &recreated))
assert.Equal(t, deployment, &recreated)
}