mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-07-30 10:38:46 -04:00
replace flannel with cilium
This commit is contained in:
parent
7e1c898870
commit
791d5564ba
98 changed files with 3626 additions and 2156 deletions
|
@ -104,7 +104,6 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool) Kube
|
|||
"profiling": "false",
|
||||
},
|
||||
},
|
||||
ControlPlaneEndpoint: "127.0.0.1:16443",
|
||||
},
|
||||
// warning: this config is applied to every node in the cluster!
|
||||
KubeletConfiguration: kubeletconf.KubeletConfiguration{
|
||||
|
@ -123,6 +122,18 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool) Kube
|
|||
APIVersion: kubeletconf.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
RegisterWithTaints: []corev1.Taint{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.cilium.io/agent-not-ready",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -157,63 +168,6 @@ func (c *CoreOSConfiguration) JoinConfiguration(externalCloudProvider bool) Kube
|
|||
}
|
||||
}
|
||||
|
||||
type AWSConfiguration struct{}
|
||||
|
||||
func (a *AWSConfiguration) InitConfiguration() KubeadmInitYAML {
|
||||
return KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
Kind: "InitConfiguration",
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
CRISocket: "/run/containerd/containerd.sock",
|
||||
IgnorePreflightErrors: []string{"SystemVerification"},
|
||||
},
|
||||
LocalAPIEndpoint: kubeadm.APIEndpoint{BindPort: bindPort},
|
||||
},
|
||||
ClusterConfiguration: kubeadm.ClusterConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
Kind: "ClusterConfiguration",
|
||||
},
|
||||
APIServer: kubeadm.APIServer{
|
||||
CertSANs: []string{"10.118.0.1"},
|
||||
},
|
||||
},
|
||||
KubeletConfiguration: kubeletconf.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletconf.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AWSConfiguration) JoinConfiguration() KubeadmJoinYAML {
|
||||
return KubeadmJoinYAML{
|
||||
JoinConfiguration: kubeadm.JoinConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
Kind: "JoinConfiguration",
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
CRISocket: "/run/containerd/containerd.sock",
|
||||
IgnorePreflightErrors: []string{"SystemVerification"},
|
||||
},
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
|
||||
},
|
||||
},
|
||||
KubeletConfiguration: kubeletconf.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletconf.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type KubeadmJoinYAML struct {
|
||||
JoinConfiguration kubeadm.JoinConfiguration
|
||||
KubeletConfiguration kubeletconf.KubeletConfiguration
|
||||
|
@ -276,10 +230,27 @@ func (k *KubeadmInitYAML) SetNodeName(nodeName string) {
|
|||
k.InitConfiguration.NodeRegistration.Name = nodeName
|
||||
}
|
||||
|
||||
// SetCertSANs sets the SANs for the certificate.
|
||||
func (k *KubeadmInitYAML) SetCertSANs(certSANs []string) {
|
||||
for _, certSAN := range certSANs {
|
||||
if certSAN == "" {
|
||||
continue
|
||||
}
|
||||
k.ClusterConfiguration.APIServer.CertSANs = append(k.ClusterConfiguration.APIServer.CertSANs, certSAN)
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubeadmInitYAML) SetApiServerAdvertiseAddress(apiServerAdvertiseAddress string) {
|
||||
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
|
||||
}
|
||||
|
||||
// SetControlPlaneEndpoint sets the control plane endpoint if controlPlaneEndpoint is not empty.
|
||||
func (k *KubeadmInitYAML) SetControlPlaneEndpoint(controlPlaneEndpoint string) {
|
||||
if controlPlaneEndpoint != "" {
|
||||
k.ClusterConfiguration.ControlPlaneEndpoint = controlPlaneEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubeadmInitYAML) SetServiceCIDR(serviceCIDR string) {
|
||||
k.ClusterConfiguration.Networking.ServiceSubnet = serviceCIDR
|
||||
}
|
||||
|
|
|
@ -13,27 +13,11 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestInitConfiguration(t *testing.T) {
|
||||
awsConfig := AWSConfiguration{}
|
||||
coreOSConfig := CoreOSConfiguration{}
|
||||
|
||||
testCases := map[string]struct {
|
||||
config KubeadmInitYAML
|
||||
}{
|
||||
"AWS init config can be created": {
|
||||
config: awsConfig.InitConfiguration(),
|
||||
},
|
||||
"AWS init config with all fields can be created": {
|
||||
config: func() KubeadmInitYAML {
|
||||
c := awsConfig.InitConfiguration()
|
||||
c.SetApiServerAdvertiseAddress("192.0.2.0")
|
||||
c.SetNodeIP("192.0.2.0")
|
||||
c.SetNodeName("node")
|
||||
c.SetPodNetworkCIDR("10.244.0.0/16")
|
||||
c.SetServiceCIDR("10.245.0.0/24")
|
||||
c.SetProviderID("somecloudprovider://instance-id")
|
||||
return c
|
||||
}(),
|
||||
},
|
||||
"CoreOS init config can be created": {
|
||||
config: coreOSConfig.InitConfiguration(true),
|
||||
},
|
||||
|
@ -67,27 +51,11 @@ func TestInitConfiguration(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJoinConfiguration(t *testing.T) {
|
||||
awsConfig := AWSConfiguration{}
|
||||
coreOSConfig := CoreOSConfiguration{}
|
||||
|
||||
testCases := map[string]struct {
|
||||
config KubeadmJoinYAML
|
||||
}{
|
||||
"AWS join config can be created": {
|
||||
config: awsConfig.JoinConfiguration(),
|
||||
},
|
||||
"AWS join config with all fields can be created": {
|
||||
config: func() KubeadmJoinYAML {
|
||||
c := awsConfig.JoinConfiguration()
|
||||
c.SetApiServerEndpoint("192.0.2.0:6443")
|
||||
c.SetNodeIP("192.0.2.0")
|
||||
c.SetNodeName("node")
|
||||
c.SetToken("token")
|
||||
c.AppendDiscoveryTokenCaCertHash("discovery-token-ca-cert-hash")
|
||||
c.SetProviderID("somecloudprovider://instance-id")
|
||||
return c
|
||||
}(),
|
||||
},
|
||||
"CoreOS join config can be created": {
|
||||
config: coreOSConfig.JoinConfiguration(true),
|
||||
},
|
||||
|
|
|
@ -239,18 +239,13 @@ func TestGetObjects(t *testing.T) {
|
|||
resourcesYAML string
|
||||
wantErr bool
|
||||
}{
|
||||
"GetObjects works on flannel deployment": {
|
||||
wantResources: resources.NewDefaultFlannelDeployment(),
|
||||
resourcesYAML: string(nginxDeplYAML),
|
||||
wantErr: false,
|
||||
},
|
||||
"GetObjects works on cluster-autoscaler deployment": {
|
||||
wantResources: resources.NewDefaultFlannelDeployment(),
|
||||
wantResources: resources.NewDefaultAutoscalerDeployment(nil, nil, nil),
|
||||
resourcesYAML: string(nginxDeplYAML),
|
||||
wantErr: false,
|
||||
},
|
||||
"GetObjects works on cloud-controller-manager deployment": {
|
||||
wantResources: resources.NewDefaultCloudControllerManagerDeployment("someProvider", "someImage", "somePath", nil, nil, nil, nil),
|
||||
wantResources: resources.NewDefaultCloudControllerManagerDeployment("someProvider", "someImage", "somePath", "someCIDR", nil, nil, nil, nil),
|
||||
resourcesYAML: string(nginxDeplYAML),
|
||||
wantErr: false,
|
||||
},
|
||||
|
|
|
@ -9,8 +9,6 @@ import (
|
|||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const defaultCIDR = "10.244.0.0/16"
|
||||
|
||||
type cloudControllerManagerDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
|
@ -22,14 +20,12 @@ type cloudControllerManagerDeployment struct {
|
|||
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
|
||||
|
||||
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
|
||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
|
||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
|
||||
command := []string{
|
||||
path,
|
||||
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
|
||||
"--leader-elect=true",
|
||||
"--allocate-node-cidrs=false",
|
||||
"--configure-cloud-routes=false",
|
||||
fmt.Sprintf("--cluster-cidr=%s", defaultCIDR),
|
||||
fmt.Sprintf("--cluster-cidr=%s", podCIDR),
|
||||
"-v=2",
|
||||
}
|
||||
command = append(command, extraArgs...)
|
||||
|
@ -151,6 +147,10 @@ func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path strin
|
|||
Key: "node-role.kubernetes.io/master",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.kubernetes.io/not-ready",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
NodeSelector: map[string]string{
|
||||
"node-role.kubernetes.io/master": "",
|
||||
|
|
|
@ -12,7 +12,7 @@ func TestCloudControllerMarshalUnmarshal(t *testing.T) {
|
|||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path", []string{}, []k8s.Volume{}, []k8s.VolumeMount{}, nil)
|
||||
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path", "192.0.2.0/24", []string{}, []k8s.Volume{}, []k8s.VolumeMount{}, nil)
|
||||
data, err := cloudControllerManagerDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
|
|
|
@ -1,339 +0,0 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
cniConfJSON = `{"name":"cbr0","cniVersion":"0.3.1","plugins":[{"type":"flannel","delegate":{"hairpinMode":true,"isDefaultGateway":true}},{"type":"portmap","capabilities":{"portMappings":true}}]}`
|
||||
netConfJSON = `{"Network":"10.244.0.0/16","Backend":{"Type":"vxlan"}}`
|
||||
)
|
||||
|
||||
// Reference: https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
|
||||
// Changes compared to the reference: added the wireguard interface "wg0" to the args of the "kube-flannel" container of the DaemonSet.
|
||||
|
||||
type FlannelDeployment struct {
|
||||
PodSecurityPolicy policy.PodSecurityPolicy
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ConfigMap k8s.ConfigMap
|
||||
DaemonSet apps.DaemonSet
|
||||
}
|
||||
|
||||
func NewDefaultFlannelDeployment() *FlannelDeployment {
|
||||
return &FlannelDeployment{
|
||||
PodSecurityPolicy: policy.PodSecurityPolicy{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "PodSecurityPolicy",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "psp.flannel.unprivileged",
|
||||
Annotations: map[string]string{
|
||||
"seccomp.security.alpha.kubernetes.io/allowedProfileNames": "docker/default",
|
||||
"seccomp.security.alpha.kubernetes.io/defaultProfileName": "docker/default",
|
||||
"apparmor.security.beta.kubernetes.io/allowedProfileNames": "runtime/default",
|
||||
"apparmor.security.beta.kubernetes.io/defaultProfileName": "runtime/default",
|
||||
},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
Volumes: []policy.FSType{
|
||||
policy.FSType("configMap"),
|
||||
policy.FSType("secret"),
|
||||
policy.FSType("emptyDir"),
|
||||
policy.FSType("hostPath"),
|
||||
},
|
||||
AllowedHostPaths: []policy.AllowedHostPath{
|
||||
{PathPrefix: "/etc/cni/net.d"},
|
||||
{PathPrefix: "/etc/kube-flannel"},
|
||||
{PathPrefix: "/run/flannel"},
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
AllowPrivilegeEscalation: proto.Bool(false),
|
||||
DefaultAllowPrivilegeEscalation: proto.Bool(false),
|
||||
AllowedCapabilities: []k8s.Capability{
|
||||
k8s.Capability("NET_ADMIN"),
|
||||
k8s.Capability("NET_RAW"),
|
||||
},
|
||||
HostPID: false,
|
||||
HostIPC: false,
|
||||
HostNetwork: true,
|
||||
HostPorts: []policy.HostPortRange{
|
||||
{Min: 0, Max: 65535},
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "flannel",
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
Verbs: []string{"use"},
|
||||
ResourceNames: []string{"psp.flannel.unprivileged"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods"},
|
||||
Verbs: []string{"get"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
Verbs: []string{"list", "watch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes/status"},
|
||||
Verbs: []string{"patch"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRoleBinding: rbac.ClusterRoleBinding{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "ClusterRoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "flannel",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "flannel",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "flannel",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ServiceAccount",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "flannel",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
ConfigMap: k8s.ConfigMap{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kube-flannel-cfg",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"tier": "node",
|
||||
"app": "flannel",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"cni-conf.json": cniConfJSON,
|
||||
"net-conf.json": netConfJSON,
|
||||
},
|
||||
},
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kube-flannel-ds",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"tier": "node",
|
||||
"app": "flannel",
|
||||
},
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &v1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "flannel"},
|
||||
},
|
||||
Template: k8s.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"tier": "node",
|
||||
"app": "flannel",
|
||||
},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
Affinity: &k8s.Affinity{
|
||||
NodeAffinity: &k8s.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &k8s.NodeSelector{
|
||||
NodeSelectorTerms: []k8s.NodeSelectorTerm{
|
||||
{MatchExpressions: []k8s.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: k8s.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HostNetwork: true,
|
||||
PriorityClassName: "system-node-critical",
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "flannel",
|
||||
InitContainers: []k8s.Container{
|
||||
{
|
||||
Name: "install-cni-plugin",
|
||||
Image: "rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.0",
|
||||
Command: []string{"cp"},
|
||||
Args: []string{"-f", "/flannel", "/opt/cni/bin/flannel"},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "cni-plugin",
|
||||
MountPath: "/opt/cni/bin",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "install-cni",
|
||||
Image: "quay.io/coreos/flannel:v0.15.1",
|
||||
Command: []string{"cp"},
|
||||
Args: []string{"-f", "/etc/kube-flannel/cni-conf.json", "/etc/cni/net.d/10-flannel.conflist"},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "cni",
|
||||
MountPath: "/etc/cni/net.d",
|
||||
},
|
||||
{
|
||||
Name: "flannel-cfg",
|
||||
MountPath: "/etc/kube-flannel/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []k8s.Container{
|
||||
{
|
||||
Name: "kube-flannel",
|
||||
Image: "quay.io/coreos/flannel:v0.15.1",
|
||||
Command: []string{"/opt/bin/flanneld"},
|
||||
Args: []string{"--ip-masq", "--kube-subnet-mgr", "--iface", "wg0"},
|
||||
Resources: k8s.ResourceRequirements{
|
||||
Requests: k8s.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("50Mi"),
|
||||
},
|
||||
Limits: k8s.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &k8s.SecurityContext{
|
||||
Privileged: proto.Bool(false),
|
||||
Capabilities: &k8s.Capabilities{
|
||||
Add: []k8s.Capability{k8s.Capability("NET_ADMIN"), k8s.Capability("NET_RAW")},
|
||||
},
|
||||
},
|
||||
Env: []k8s.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &k8s.EnvVarSource{
|
||||
FieldRef: &k8s.ObjectFieldSelector{FieldPath: "metadata.name"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &k8s.EnvVarSource{
|
||||
FieldRef: &k8s.ObjectFieldSelector{FieldPath: "metadata.namespace"},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "run",
|
||||
MountPath: "/run/flannel",
|
||||
},
|
||||
{
|
||||
Name: "flannel-cfg",
|
||||
MountPath: "/etc/kube-flannel/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []k8s.Volume{
|
||||
{
|
||||
Name: "run",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/run/flannel",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cni-plugin",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/opt/cni/bin",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cni",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/etc/cni/net.d",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "flannel-cfg",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
ConfigMap: &k8s.ConfigMapVolumeSource{
|
||||
LocalObjectReference: k8s.LocalObjectReference{
|
||||
Name: "kube-flannel-cfg",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FlannelDeployment) Marshal() ([]byte, error) {
|
||||
return MarshalK8SResources(f)
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFlannelDeployment(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
flannelDeployment := NewDefaultFlannelDeployment()
|
||||
data, err := flannelDeployment.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated FlannelDeployment
|
||||
require.NoError(UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(flannelDeployment, &recreated)
|
||||
}
|
|
@ -20,6 +20,11 @@ const (
|
|||
kubeletStartTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
kubernetesKeyRegexp = regexp.MustCompile("[a-f0-9]{64}")
|
||||
providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
|
||||
)
|
||||
|
||||
// Client provides the functionality of `kubectl apply`.
|
||||
type Client interface {
|
||||
Apply(resources resources.Marshaler, forceConflicts bool) error
|
||||
|
@ -27,21 +32,6 @@ type Client interface {
|
|||
// TODO: add tolerations
|
||||
}
|
||||
|
||||
type ClusterUtil interface {
|
||||
InstallComponents(ctx context.Context, version string) error
|
||||
InitCluster(initConfig []byte) error
|
||||
JoinCluster(joinConfig []byte) error
|
||||
SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error
|
||||
SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error
|
||||
StartKubelet() error
|
||||
RestartKubelet() error
|
||||
GetControlPlaneJoinCertificateKey() (string, error)
|
||||
CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
}
|
||||
|
||||
// KubernetesUtil provides low level management of the kubernetes cluster.
|
||||
type KubernetesUtil struct {
|
||||
inst installer
|
||||
|
@ -68,7 +58,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, version string)
|
|||
return enableSystemdUnit(ctx, kubeletServiceEtcPath)
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) InitCluster(initConfig []byte) error {
|
||||
func (k *KubernetesUtil) InitCluster(ctx context.Context, initConfig []byte) error {
|
||||
// TODO: audit policy should be user input
|
||||
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
|
||||
if err != nil {
|
||||
|
@ -88,7 +78,7 @@ func (k *KubernetesUtil) InitCluster(initConfig []byte) error {
|
|||
return fmt.Errorf("writing kubeadm init yaml config %v failed: %w", initConfigFile.Name(), err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(kubeadmPath, "init", "--config", initConfigFile.Name())
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "init", "--config", initConfigFile.Name())
|
||||
_, err = cmd.Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
|
@ -100,18 +90,84 @@ func (k *KubernetesUtil) InitCluster(initConfig []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetupPodNetwork sets up the flannel pod network.
|
||||
func (k *KubernetesUtil) SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error {
|
||||
if err := kubectl.Apply(podNetworkConfiguration, true); err != nil {
|
||||
type SetupPodNetworkInput struct {
|
||||
CloudProvider string
|
||||
NodeName string
|
||||
FirstNodePodCIDR string
|
||||
SubnetworkPodCIDR string
|
||||
ProviderID string
|
||||
}
|
||||
|
||||
// SetupPodNetwork sets up the cilium pod network.
|
||||
func (k *KubernetesUtil) SetupPodNetwork(ctx context.Context, in SetupPodNetworkInput) error {
|
||||
switch in.CloudProvider {
|
||||
case "gcp":
|
||||
return k.setupGCPPodNetwork(ctx, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR)
|
||||
case "azure":
|
||||
return k.setupAzurePodNetwork(ctx, in.ProviderID, in.SubnetworkPodCIDR)
|
||||
case "qemu":
|
||||
return k.setupQemuPodNetwork(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) setupAzurePodNetwork(ctx context.Context, providerID, subnetworkPodCIDR string) error {
|
||||
matches := providerIDRegex.FindStringSubmatch(providerID)
|
||||
if len(matches) != 5 {
|
||||
return fmt.Errorf("error splitting providerID %q", providerID)
|
||||
}
|
||||
|
||||
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--azure-resource-group", matches[2], "--encryption", "wireguard", "--ipam", "azure",
|
||||
"--helm-set",
|
||||
"tunnel=disabled,enableIPv4Masquerade=true,azure.enabled=true,debug.enabled=true,ipv4NativeRoutingCIDR="+subnetworkPodCIDR+
|
||||
",endpointRoutes.enabled=true,encryption.enabled=true,encryption.type=wireguard,l7Proxy=false,egressMasqueradeInterfaces=eth0")
|
||||
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
|
||||
out, err := ciliumInstall.CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) setupGCPPodNetwork(ctx context.Context, nodeName, nodePodCIDR, subnetworkPodCIDR string) error {
|
||||
out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
|
||||
err := exec.Command(kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\",\"effect\":\"NoSchedule\"}}]").Run()
|
||||
err = exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\",\"effect\":\"NoSchedule\"}},{\"op\":\"add\",\"path\":\"/spec/template/spec/nodeSelector\",\"value\":{\"node-role.kubernetes.io/control-plane\":\"\"}}]").Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.Command(kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.kubernetes.io/network-unavailable\",\"value\":\"\",\"effect\":\"NoSchedule\"}}]").Run()
|
||||
|
||||
err = exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.kubernetes.io/network-unavailable\",\"value\":\"\",\"effect\":\"NoSchedule\"}}]").Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--encryption", "wireguard", "--ipam", "kubernetes", "--ipv4-native-routing-cidr", subnetworkPodCIDR, "--helm-set", "endpointRoutes.enabled=true,tunnel=disabled")
|
||||
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
|
||||
out, err = ciliumInstall.CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) setupQemuPodNetwork(ctx context.Context) error {
|
||||
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--encryption", "wireguard", "--helm-set", "ipam.operator.clusterPoolIPv4PodCIDRList=10.244.0.0/16,endpointRoutes.enabled=true")
|
||||
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
|
||||
out, err := ciliumInstall.CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupAutoscaling deploys the k8s cluster autoscaler.
|
||||
|
@ -142,7 +198,7 @@ func (k *KubernetesUtil) SetupCloudNodeManager(kubectl Client, cloudNodeManagerC
|
|||
}
|
||||
|
||||
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
|
||||
func (k *KubernetesUtil) JoinCluster(joinConfig []byte) error {
|
||||
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte) error {
|
||||
// TODO: audit policy should be user input
|
||||
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
|
||||
if err != nil {
|
||||
|
@ -163,7 +219,7 @@ func (k *KubernetesUtil) JoinCluster(joinConfig []byte) error {
|
|||
}
|
||||
|
||||
// run `kubeadm join` to join a worker node to an existing Kubernetes cluster
|
||||
cmd := exec.Command(kubeadmPath, "join", "--config", joinConfigFile.Name())
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "join", "--config", joinConfigFile.Name())
|
||||
if _, err := cmd.Output(); err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
|
@ -201,10 +257,10 @@ func (k *KubernetesUtil) RestartKubelet() error {
|
|||
|
||||
// GetControlPlaneJoinCertificateKey return the key which can be used in combination with the joinArgs
|
||||
// to join the Cluster as control-plane.
|
||||
func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
||||
func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error) {
|
||||
// Key will be valid for 1h (no option to reduce the duration).
|
||||
// https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-upload-certs
|
||||
output, err := exec.Command(kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output()
|
||||
output, err := exec.CommandContext(ctx, kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
|
@ -218,7 +274,7 @@ func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
|||
[upload-certs] Using certificate key:
|
||||
9555b74008f24687eb964bd90a164ecb5760a89481d9c55a77c129b7db438168
|
||||
*/
|
||||
key := regexp.MustCompile("[a-f0-9]{64}").FindString(string(output))
|
||||
key := kubernetesKeyRegexp.FindString(string(output))
|
||||
if key == "" {
|
||||
return "", fmt.Errorf("failed to parse kubeadm output: %s", string(output))
|
||||
}
|
||||
|
@ -226,8 +282,8 @@ func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
|||
}
|
||||
|
||||
// CreateJoinToken creates a new bootstrap (join) token.
|
||||
func (k *KubernetesUtil) CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
output, err := exec.Command(kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output()
|
||||
func (k *KubernetesUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
output, err := exec.CommandContext(ctx, kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kubeadm token create failed: %w", err)
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue