konnectivity strict node routing

This commit is contained in:
Leonard Cohnen 2022-09-11 04:39:26 +02:00
parent fcd318344a
commit 8a60db5bfa
12 changed files with 112 additions and 26 deletions

View File

@ -25,6 +25,8 @@ type ProviderMetadata interface {
Self(ctx context.Context) (metadata.InstanceMetadata, error)
// GetSubnetworkCIDR retrieves the subnetwork CIDR for the current instance.
GetSubnetworkCIDR(ctx context.Context) (string, error)
// GetNodenetworkCIDR retrieves the node network CIDR for the current instance.
GetNodenetworkCIDR(ctx context.Context) (string, error)
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
SupportsLoadBalancer() bool
// GetLoadBalancerEndpoint retrieves the load balancer endpoint.
@ -97,6 +99,9 @@ type stubProviderMetadata struct {
GetSubnetworkCIDRErr error
GetSubnetworkCIDRResp string
GetNodenetworkCIDRErr error
GetNodenetworkCIDRResp string
ListErr error
ListResp []metadata.InstanceMetadata
@ -121,6 +126,10 @@ func (m *stubProviderMetadata) GetSubnetworkCIDR(ctx context.Context) (string, e
return m.GetSubnetworkCIDRResp, m.GetSubnetworkCIDRErr
}
func (m *stubProviderMetadata) GetNodenetworkCIDR(ctx context.Context) (string, error) {
return m.GetNodenetworkCIDRResp, m.GetNodenetworkCIDRErr
}
func (m *stubProviderMetadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
return m.ListResp, m.ListErr
}

View File

@ -107,7 +107,7 @@ func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents
"--admin-server-port=8133",
"--health-server-port=8134",
"--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token",
"--agent-identifiers=host=$(HOST_IP)",
"--agent-identifiers=host=$(HOST_IP)&default-route=true",
// we will be able to avoid constant polling when either one is done:
// https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/358
// https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/273
@ -213,7 +213,7 @@ func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents
}
}
func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod {
func NewKonnectivityServerStaticPod(nodeCIDR string) *konnectivityServerStaticPod {
udsHostPathType := corev1.HostPathDirectoryOrCreate
return &konnectivityServerStaticPod{
StaticPod: corev1.Pod{
@ -253,7 +253,9 @@ func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod {
"--agent-service-account=konnectivity-agent",
"--kubeconfig=/etc/kubernetes/konnectivity-server.conf",
"--authentication-audience=system:konnectivity-server",
"--proxy-strategies=destHost,default",
// "--proxy-strategies=destHost,default",
"--proxy-strategies=destHost,defaultRoute",
"--node-cidr=" + nodeCIDR, //"--node-cidr=10.9.0.0/16",
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{

View File

@ -131,7 +131,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, version versions
}
func (k *KubernetesUtil) InitCluster(
ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, log *logger.Logger,
ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger,
) error {
// TODO: audit policy should be user input
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
@ -182,7 +182,7 @@ func (k *KubernetesUtil) InitCluster(
}
log.Infof("Preparing node for Konnectivity")
if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint); err != nil {
if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint, nodeCIDR); err != nil {
return fmt.Errorf("setup konnectivity: %w", err)
}
@ -201,7 +201,7 @@ func (k *KubernetesUtil) InitCluster(
return nil
}
func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context, loadBalancerEndpoint string) error {
func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context, loadBalancerEndpoint, nodeCIDR string) error {
if !strings.Contains(loadBalancerEndpoint, ":") {
loadBalancerEndpoint = net.JoinHostPort(loadBalancerEndpoint, strconv.Itoa(constants.KubernetesPort))
}
@ -210,7 +210,7 @@ func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context,
return fmt.Errorf("creating static pods directory: %w", err)
}
konnectivityServerYaml, err := resources.NewKonnectivityServerStaticPod().Marshal()
konnectivityServerYaml, err := resources.NewKonnectivityServerStaticPod(nodeCIDR).Marshal()
if err != nil {
return fmt.Errorf("generating konnectivity server static pod: %w", err)
}
@ -514,7 +514,7 @@ func (k *KubernetesUtil) SetupNodeOperator(ctx context.Context, kubectl Client,
}
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error {
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error {
// TODO: audit policy should be user input
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
if err != nil {
@ -535,7 +535,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, pee
if peerRole == role.ControlPlane {
log.Infof("Prep Init Kubernetes cluster")
if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint); err != nil {
if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint, nodeCIDR); err != nil {
return fmt.Errorf("setup konnectivity: %w", err)
}
}

View File

@ -19,8 +19,8 @@ import (
type clusterUtil interface {
InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, log *logger.Logger) error
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error
SetupHelmDeployments(ctx context.Context, client k8sapi.Client, helmDeployments []byte, in k8sapi.SetupPodNetworkInput, log *logger.Logger) error
SetupAccessManager(kubectl k8sapi.Client, sshUsers kubernetes.Marshaler) error
SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error

View File

@ -97,6 +97,7 @@ func (k *KubeWrapper) InitCluster(
var instance metadata.InstanceMetadata
var publicIP string
var nodePodCIDR string
var nodeCIDR string
var subnetworkPodCIDR string
var controlPlaneEndpoint string // this is the endpoint in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>"
var nodeIP string
@ -127,6 +128,10 @@ func (k *KubeWrapper) InitCluster(
if err != nil {
return nil, fmt.Errorf("retrieving subnetwork CIDR: %w", err)
}
nodeCIDR, err = k.providerMetadata.GetNodenetworkCIDR(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving nodeCIDR CIDR: %w", err)
}
controlPlaneEndpoint = publicIP
if k.providerMetadata.SupportsLoadBalancer() {
controlPlaneEndpoint, err = k.providerMetadata.GetLoadBalancerEndpoint(ctx)
@ -155,7 +160,7 @@ func (k *KubeWrapper) InitCluster(
return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err)
}
log.Infof("Initializing Kubernetes cluster")
if err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, validIPs, controlPlaneEndpoint, log); err != nil {
if err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, validIPs, controlPlaneEndpoint, nodeCIDR, log); err != nil {
return nil, fmt.Errorf("kubeadm init: %w", err)
}
kubeConfig, err := k.GetKubeconfig()
@ -264,6 +269,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
}
nodeName := nodeInternalIP
var providerID string
var nodeCIDR string
var loadbalancerEndpoint string
if k.providerMetadata.Supported() {
log.Infof("Retrieving node metadata")
@ -274,6 +280,10 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
providerID = instance.ProviderID
nodeName = instance.Name
nodeInternalIP = instance.VPCIP
nodeCIDR, err = k.providerMetadata.GetNodenetworkCIDR(ctx)
if err != nil {
return fmt.Errorf("retrieving nodeCIDR CIDR: %w", err)
}
if k.providerMetadata.SupportsLoadBalancer() {
loadbalancerEndpoint, err = k.providerMetadata.GetLoadBalancerEndpoint(ctx)
if err != nil {
@ -305,7 +315,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err)
}
log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster")
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadbalancerEndpoint, log); err != nil {
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadbalancerEndpoint, nodeCIDR, log); err != nil {
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
}

View File

@ -557,7 +557,7 @@ func (s *stubClusterUtil) InstallComponents(ctx context.Context, version version
return s.installComponentsErr
}
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, log *logger.Logger) error {
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error {
s.initConfigs = append(s.initConfigs, initConfig)
return s.initClusterErr
}
@ -610,7 +610,7 @@ func (s *stubClusterUtil) SetupNodeOperator(ctx context.Context, kubectl k8sapi.
return s.setupNodeOperatorErr
}
func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error {
func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, nodeCIDR string, log *logger.Logger) error {
s.joinConfigs = append(s.joinConfigs, joinConfig)
return s.joinClusterErr
}

View File

@ -154,6 +154,24 @@ func (m *Metadata) GetNetworkSecurityGroupName(ctx context.Context) (string, err
return *nsg.Name, nil
}
// GetNodenetworkCIDR retrieves the subnetwork CIDR from cloud provider metadata.
func (m *Metadata) GetNodenetworkCIDR(ctx context.Context) (string, error) {
resourceGroup, err := m.imdsAPI.ResourceGroup(ctx)
if err != nil {
return "", err
}
virtualNetwork, err := m.getVirtualNetwork(ctx, resourceGroup)
if err != nil {
return "", err
}
if virtualNetwork == nil || virtualNetwork.Properties == nil || len(virtualNetwork.Properties.Subnets) == 0 ||
virtualNetwork.Properties.Subnets[0].Properties == nil || virtualNetwork.Properties.Subnets[0].Properties.AddressPrefix == nil {
return "", fmt.Errorf("could not retrieve subnetwork CIDR from virtual network %v", virtualNetwork)
}
return *virtualNetwork.Properties.Subnets[0].Properties.AddressPrefix, nil
}
// GetSubnetworkCIDR retrieves the subnetwork CIDR from cloud provider metadata.
func (m *Metadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
resourceGroup, err := m.imdsAPI.ResourceGroup(ctx)

View File

@ -186,12 +186,29 @@ func (c *Client) UnsetInstanceMetadata(ctx context.Context, project, zone, insta
// RetrieveSubnetworkAliasCIDR returns the alias CIDR of the subnetwork specified by project, zone and subnetworkName.
func (c *Client) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) {
subnetwork, err := c.retrieveSubnetwork(ctx, project, zone, instanceName)
if err != nil {
return "", fmt.Errorf("retrieving subnetwork: %w", err)
}
return *(subnetwork.SecondaryIpRanges[0]).IpCidrRange, nil
}
// RetrieveSubnetworkCIDR returns the alias CIDR of the subnetwork specified by project, zone and subnetworkName.
func (c *Client) RetrieveSubnetworkCIDR(ctx context.Context, project, zone, instanceName string) (string, error) {
subnetwork, err := c.retrieveSubnetwork(ctx, project, zone, instanceName)
if err != nil {
return "", fmt.Errorf("retrieving subnetwork: %w", err)
}
return *(subnetwork.IpCidrRange), nil
}
func (c *Client) retrieveSubnetwork(ctx context.Context, project, zone, instanceName string) (*computepb.Subnetwork, error) {
instance, err := c.getComputeInstance(ctx, project, zone, instanceName)
if err != nil {
return "", err
return nil, err
}
if instance == nil || instance.NetworkInterfaces == nil || len(instance.NetworkInterfaces) == 0 || instance.NetworkInterfaces[0].Subnetwork == nil {
return "", fmt.Errorf("retrieving instance network interfaces failed")
return nil, fmt.Errorf("retrieving instance network interfaces failed")
}
subnetworkURL := *instance.NetworkInterfaces[0].Subnetwork
subnetworkURLFragments := strings.Split(subnetworkURL, "/")
@ -202,7 +219,7 @@ func (c *Client) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone,
// europe-west3-b --> europe-west3
region := zoneFromRegionRegex.FindString(zone)
if region == "" {
return "", fmt.Errorf("invalid zone %s", zone)
return nil, fmt.Errorf("invalid zone %s", zone)
}
req := &computepb.GetSubnetworkRequest{
@ -212,13 +229,12 @@ func (c *Client) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone,
}
subnetwork, err := c.subnetworkAPI.Get(ctx, req)
if err != nil {
return "", fmt.Errorf("retrieving subnetwork alias CIDR failed: %w", err)
return nil, fmt.Errorf("retrieving subnetwork alias CIDR failed: %w", err)
}
if subnetwork == nil || len(subnetwork.SecondaryIpRanges) == 0 || (subnetwork.SecondaryIpRanges[0]).IpCidrRange == nil {
return "", fmt.Errorf("retrieving subnetwork alias CIDR returned invalid results")
return nil, fmt.Errorf("retrieving subnetwork alias CIDR returned invalid results")
}
return *(subnetwork.SecondaryIpRanges[0]).IpCidrRange, nil
return subnetwork, nil
}
// RetrieveLoadBalancerEndpoint returns the endpoint of the load balancer with the constellation-uid tag.

View File

@ -30,8 +30,10 @@ type API interface {
RetrieveZone() (string, error)
// RetrieveInstanceName retrieves the instance name of the current instance.
RetrieveInstanceName() (string, error)
// RetrieveSubnetworkAliasCIDR retrieves the subnetwork CIDR of the current instance.
// RetrieveSubnetworkAliasCIDR retrieves the subnetwork alias CIDR of the current instance.
RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error)
// RetrieveSubnetworkCIDR retrieves the subnetwork CIDR of the current instance.
RetrieveSubnetworkCIDR(ctx context.Context, project, zone, instanceName string) (string, error)
// RetrieveLoadBalancerEndpoint retrieves the load balancer endpoint of the current instance.
RetrieveLoadBalancerEndpoint(ctx context.Context, project string) (string, error)
// SetInstanceMetadata sets metadata key: value of the instance specified by project, zone and instanceName.
@ -112,6 +114,23 @@ func (m *Metadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
return m.api.RetrieveSubnetworkAliasCIDR(ctx, project, zone, instanceName)
}
// GetNodenetworkCIDR returns the subnetwork CIDR of the current instance.
func (m *Metadata) GetNodenetworkCIDR(ctx context.Context) (string, error) {
project, err := m.api.RetrieveProjectID()
if err != nil {
return "", err
}
zone, err := m.api.RetrieveZone()
if err != nil {
return "", err
}
instanceName, err := m.api.RetrieveInstanceName()
if err != nil {
return "", err
}
return m.api.RetrieveSubnetworkCIDR(ctx, project, zone, instanceName)
}
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
func (m *Metadata) SupportsLoadBalancer() bool {
return true

View File

@ -246,6 +246,7 @@ type stubGCPClient struct {
retrieveInstanceMetadaValues map[string]string
retrieveInstanceMetadataErr error
retrieveSubnetworkAliasErr error
retrieveSubnetworkErr error
projectID string
zone string
instanceName string
@ -323,3 +324,7 @@ func (s *stubGCPClient) UnsetInstanceMetadata(ctx context.Context, project, zone
func (s *stubGCPClient) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) {
return "", s.retrieveSubnetworkAliasErr
}
func (s *stubGCPClient) RetrieveSubnetworkCIDR(ctx context.Context, project, zone, instanceName string) (string, error) {
return "", s.retrieveSubnetworkErr
}

View File

@ -83,11 +83,16 @@ func (m Metadata) UID(ctx context.Context) (string, error) {
return "", nil
}
// GetSubnetworkCIDR retrieves the subnetwork CIDR from cloud provider metadata.
// GetSubnetworkCIDR retrieves the pod subnetwork CIDR from cloud provider metadata.
func (m Metadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
return "10.244.0.0/16", nil
}
// GetNodenetworkCIDR retrieves the node subnetwork CIDR from cloud provider metadata.
func (m Metadata) GetNodenetworkCIDR(ctx context.Context) (string, error) {
return "0.0.0.0/0", nil
}
func (m Metadata) retrieveMetadata(ctx context.Context, uri string) ([]byte, error) {
url := &url.URL{
Scheme: "http",

View File

@ -43,8 +43,10 @@ func IsPreviewK8sVersion(version ValidK8sVersion) bool {
const (
// Constellation images.
// These images are built in a way that they support all versions currently listed in VersionConfigs.
KonnectivityAgentImage = "us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.32"
KonnectivityServerImage = "registry.k8s.io/kas-network-proxy/proxy-server:v0.0.32"
KonnectivityAgentImage = "us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.32"
// TODO: switch back to official image once cilium node2node encryption is enabled.
// KonnectivityServerImage = "registry.k8s.io/kas-network-proxy/proxy-server:v0.0.32".
KonnectivityServerImage = "ghcr.io/3u13r/constellation-konnectivity-server:v0.0.33-edgeless@sha256:bf5748999b20576c7c97f25d2762408d705df5ae20640494bcb4cac5d648b583"
JoinImage = "ghcr.io/edgelesssys/constellation/join-service:v2.0.0"
AccessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v2.0.0"
KmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v2.0.0"