From 31276e667db843342d86e14e43c2c61ec356417f Mon Sep 17 00:00:00 2001 From: Leonard Cohnen Date: Mon, 19 Sep 2022 11:07:46 +0200 Subject: [PATCH] [no ci] don't enable strict mode on azure --- .../kubernetes/k8sapi/resources/konnectivity.go | 11 ++++++++--- bootstrapper/internal/kubernetes/k8sapi/util.go | 12 ++++++------ bootstrapper/internal/kubernetes/k8sutil.go | 4 ++-- bootstrapper/internal/kubernetes/kubernetes.go | 4 ++-- bootstrapper/internal/kubernetes/kubernetes_test.go | 4 ++-- cli/internal/gcp/client/loadbalancer.go | 6 ++++-- internal/versions/versions.go | 2 +- 7 files changed, 25 insertions(+), 18 deletions(-) diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go b/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go index 63e8ae41c..d617b1d50 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go @@ -113,6 +113,7 @@ func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents // https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/273 "--sync-forever=true", // Ensure stable connection to the konnectivity server. + "--keepalive-time=20s", "--sync-interval=1s", // GKE: 5s "--sync-interval-cap=3s", // GKE: 30s "--probe-interval=1s", // GKE: 5s @@ -212,9 +213,9 @@ func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents } } -func NewKonnectivityServerStaticPod(nodeCIDR string) *konnectivityServerStaticPod { +func NewKonnectivityServerStaticPod(nodeCIDR, csp string) *konnectivityServerStaticPod { udsHostPathType := corev1.HostPathDirectoryOrCreate - return &konnectivityServerStaticPod{ + yaml := &konnectivityServerStaticPod{ StaticPod: corev1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -253,7 +254,6 @@ func NewKonnectivityServerStaticPod(nodeCIDR string) *konnectivityServerStaticPo "--kubeconfig=/etc/kubernetes/konnectivity-server.conf", "--authentication-audience=system:konnectivity-server", "--proxy-strategies=destHost,default", - "--node-cidr=" + nodeCIDR, //--node-cidr=10.9.0.0/16, }, LivenessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -331,6 +331,11 @@ func NewKonnectivityServerStaticPod(nodeCIDR string) *konnectivityServerStaticPo }, }, } + // Add strict routing via setting "--node-cidr=10.9.0.0/16" as argument. + if csp != "gcp" { + yaml.StaticPod.Spec.Containers[0].Args = append(yaml.StaticPod.Spec.Containers[0].Args, "--node-cidr="+nodeCIDR) + } + return yaml } func NewEgressSelectorConfiguration() *egressSelectorConfiguration { diff --git a/bootstrapper/internal/kubernetes/k8sapi/util.go b/bootstrapper/internal/kubernetes/k8sapi/util.go index d3ddc6ec1..2c0a7a633 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/util.go +++ b/bootstrapper/internal/kubernetes/k8sapi/util.go @@ -131,7 +131,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, version versions } func (k *KubernetesUtil) InitCluster( - ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger, + ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, csp string, log *logger.Logger, ) error { // TODO: audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() @@ -182,7 +182,7 @@ func (k *KubernetesUtil) InitCluster( } log.Infof("Preparing node for Konnectivity") - if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint, nodeCIDR); err != nil { + if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint, nodeCIDR, csp); err != nil { return fmt.Errorf("setup konnectivity: %w", err) } @@ -201,7 +201,7 @@ func (k *KubernetesUtil) InitCluster( return nil } -func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context, loadBalancerEndpoint, nodeCIDR string) error { +func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context, loadBalancerEndpoint, nodeCIDR, csp string) error { if !strings.Contains(loadBalancerEndpoint, ":") { loadBalancerEndpoint = net.JoinHostPort(loadBalancerEndpoint, strconv.Itoa(constants.KubernetesPort)) } @@ -210,7 +210,7 @@ func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context, return fmt.Errorf("creating static pods directory: %w", err) } - konnectivityServerYaml, err := resources.NewKonnectivityServerStaticPod(nodeCIDR).Marshal() + konnectivityServerYaml, err := resources.NewKonnectivityServerStaticPod(nodeCIDR, csp).Marshal() if err != nil { return fmt.Errorf("generating konnectivity server static pod: %w", err) } @@ -514,7 +514,7 @@ func (k *KubernetesUtil) SetupNodeOperator(ctx context.Context, kubectl Client, } // JoinCluster joins existing Kubernetes cluster using kubeadm join. -func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error { +func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, csp string, log *logger.Logger) error { // TODO: audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() if err != nil { @@ -535,7 +535,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, pee if peerRole == role.ControlPlane { log.Infof("Prep Init Kubernetes cluster") - if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint, nodeCIDR); err != nil { + if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint, nodeCIDR, csp); err != nil { return fmt.Errorf("setup konnectivity: %w", err) } } diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index f62ea483b..9e80db8f1 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -19,8 +19,8 @@ import ( type clusterUtil interface { InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error - InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error - JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error + InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, csp string, log *logger.Logger) error + JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, csp string, log *logger.Logger) error SetupHelmDeployments(ctx context.Context, client k8sapi.Client, helmDeployments []byte, in k8sapi.SetupPodNetworkInput, log *logger.Logger) error SetupAccessManager(kubectl k8sapi.Client, sshUsers kubernetes.Marshaler) error SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index dbc2d2215..f88ac7e80 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -160,7 +160,7 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err) } log.Infof("Initializing Kubernetes cluster") - if err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, validIPs, controlPlaneEndpoint, nodeCIDR, log); err != nil { + if err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, validIPs, controlPlaneEndpoint, nodeCIDR, k.cloudProvider, log); err != nil { return nil, fmt.Errorf("kubeadm init: %w", err) } kubeConfig, err := k.GetKubeconfig() @@ -315,7 +315,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err) } log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster") - if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadbalancerEndpoint, nodeCIDR, log); err != nil { + if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadbalancerEndpoint, nodeCIDR, k.cloudProvider, log); err != nil { return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err) } diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index 2da0dd0e3..a06239390 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -557,7 +557,7 @@ func (s *stubClusterUtil) InstallComponents(ctx context.Context, version version return s.installComponentsErr } -func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error { +func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, csp string, log *logger.Logger) error { s.initConfigs = append(s.initConfigs, initConfig) return s.initClusterErr } @@ -610,7 +610,7 @@ func (s *stubClusterUtil) SetupNodeOperator(ctx context.Context, kubectl k8sapi. return s.setupNodeOperatorErr } -func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error { +func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, csp string, log *logger.Logger) error { s.joinConfigs = append(s.joinConfigs, joinConfig) return s.joinClusterErr } diff --git a/cli/internal/gcp/client/loadbalancer.go b/cli/internal/gcp/client/loadbalancer.go index 16b91e229..63b664823 100644 --- a/cli/internal/gcp/client/loadbalancer.go +++ b/cli/internal/gcp/client/loadbalancer.go @@ -172,10 +172,12 @@ func (c *Client) createBackendService(ctx context.Context, lb *loadBalancer) err LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(computepb.BackendService_EXTERNAL)]), HealthChecks: []string{c.resourceURI(scopeGlobal, "healthChecks", lb.name)}, PortName: proto.String(lb.backendPortName), + TimeoutSec: proto.Int32(240), Backends: []*computepb.Backend{ { - BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_UTILIZATION)]), - Group: proto.String(c.resourceURI(scopeZone, "instanceGroups", c.controlPlaneInstanceGroup)), + BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_UTILIZATION)]), + MaxUtilization: proto.Float32(0.8), + Group: proto.String(c.resourceURI(scopeZone, "instanceGroups", c.controlPlaneInstanceGroup)), }, }, }, diff --git a/internal/versions/versions.go b/internal/versions/versions.go index d7b4c54d1..3f09238cd 100644 --- a/internal/versions/versions.go +++ b/internal/versions/versions.go @@ -46,7 +46,7 @@ const ( KonnectivityAgentImage = "us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.32" // TODO: switch back to official image once cilium node2node encryption is enabled. // KonnectivityServerImage = "registry.k8s.io/kas-network-proxy/proxy-server:v0.0.32". - KonnectivityServerImage = "ghcr.io/3u13r/constellation-konnectivity-server:v0.0.33-edgeless" + KonnectivityServerImage = "ghcr.io/3u13r/constellation-konnectivity-server:v0.0.33-edgeless@sha256:75a46a3d6cca859e301059ba62324cf986826122ec315a753dd7389d3fe09473" JoinImage = "ghcr.io/edgelesssys/constellation/join-service:v2.0.0" AccessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v2.0.0" KmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v2.0.0"