diff --git a/CHANGELOG.md b/CHANGELOG.md index cec0aa93c..ba57534fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - GCP: Support for higher end N2D standard (128 & 224 vCPUs), *high-mem* and *high-cpu* VMs - Add `constellation upgrade` to update node images in Constellation. - Add cilium v1.12.1 with strict mode v2 +- Konnectivity is now deployed for secure API server to node/pod/service communication. ### Changed diff --git a/bootstrapper/cmd/bootstrapper/main.go b/bootstrapper/cmd/bootstrapper/main.go index 5a3fe68c7..4019e0480 100644 --- a/bootstrapper/cmd/bootstrapper/main.go +++ b/bootstrapper/cmd/bootstrapper/main.go @@ -11,7 +11,6 @@ import ( "encoding/json" "flag" "io" - "net" "os" "strconv" @@ -34,10 +33,8 @@ import ( "github.com/edgelesssys/constellation/internal/cloud/vmtype" "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/file" - "github.com/edgelesssys/constellation/internal/iproute" "github.com/edgelesssys/constellation/internal/logger" "github.com/edgelesssys/constellation/internal/oid" - "github.com/edgelesssys/constellation/internal/role" "github.com/spf13/afero" "go.uber.org/zap" ) @@ -107,9 +104,6 @@ func main() { ) openTPM = vtpm.OpenVTPM fs = afero.NewOsFs() - if err := setLoadbalancerRoute(ctx, metadata); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set loadbalancer route") - } log.Infof("Added load balancer IP to routing table") case cloudprovider.Azure: pcrs, err := vtpm.GetSelectedPCRs(vtpm.OpenVTPM, vtpm.AzurePCRSelection) @@ -181,22 +175,3 @@ func main() { run(issuer, openTPM, fileHandler, clusterInitJoiner, metadataAPI, bindIP, bindPort, log, cloudLogger) } - -func setLoadbalancerRoute(ctx context.Context, meta metadataAPI) error { - self, err := meta.Self(ctx) - if err != nil { - return err - } - if self.Role != role.ControlPlane { - return nil - } - endpoint, err := meta.GetLoadBalancerEndpoint(ctx) - if err != nil { - return err - } - ip, _, err := net.SplitHostPort(endpoint) - if err != nil { - return err - } - return iproute.AddToLocalRoutingTable(ctx, ip) -} diff --git a/bootstrapper/internal/certificate/certificate.go b/bootstrapper/internal/certificate/certificate.go new file mode 100644 index 000000000..b5f90c973 --- /dev/null +++ b/bootstrapper/internal/certificate/certificate.go @@ -0,0 +1,37 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package certificate + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" +) + +// GetCertificateRequest returns a certificate request and matching private key. +func GetCertificateRequest(csrTemplate *x509.CertificateRequest) (certificateRequest []byte, privateKey []byte, err error) { + privK, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, nil, err + } + keyBytes, err := x509.MarshalECPrivateKey(privK) + if err != nil { + return nil, nil, err + } + keyPem := pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: keyBytes, + }) + certificateRequest, err = x509.CreateCertificateRequest(rand.Reader, csrTemplate, privK) + if err != nil { + return nil, nil, err + } + + return certificateRequest, keyPem, nil +} diff --git a/bootstrapper/internal/kubelet/kubelet.go b/bootstrapper/internal/kubelet/kubelet.go index b5a7b8ce3..e04f4cfb6 100644 --- a/bootstrapper/internal/kubelet/kubelet.go +++ b/bootstrapper/internal/kubelet/kubelet.go @@ -7,14 +7,11 @@ SPDX-License-Identifier: AGPL-3.0-only package kubelet import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" "crypto/x509" "crypto/x509/pkix" - "encoding/pem" "net" + "github.com/edgelesssys/constellation/bootstrapper/internal/certificate" "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) @@ -27,18 +24,6 @@ const ( // GetCertificateRequest returns a certificate request and macthing private key for the kubelet. func GetCertificateRequest(nodeName string, ips []net.IP) (certificateRequest []byte, privateKey []byte, err error) { - privK, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, nil, err - } - keyBytes, err := x509.MarshalECPrivateKey(privK) - if err != nil { - return nil, nil, err - } - kubeletKey := pem.EncodeToMemory(&pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: keyBytes, - }) csrTemplate := &x509.CertificateRequest{ Subject: pkix.Name{ Organization: []string{constants.NodesGroup}, @@ -46,10 +31,5 @@ func GetCertificateRequest(nodeName string, ips []net.IP) (certificateRequest [] }, IPAddresses: ips, } - certificateRequest, err = x509.CreateCertificateRequest(rand.Reader, csrTemplate, privK) - if err != nil { - return nil, nil, err - } - - return certificateRequest, kubeletKey, nil + return certificate.GetCertificateRequest(csrTemplate) } diff --git a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go index 2bcfb4922..59607da04 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go +++ b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go @@ -66,12 +66,13 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool, k8sV APIServer: kubeadm.APIServer{ ControlPlaneComponent: kubeadm.ControlPlaneComponent{ ExtraArgs: map[string]string{ - "audit-policy-file": auditPolicyPath, - "audit-log-path": filepath.Join(auditLogDir, auditLogFile), // CIS benchmark - "audit-log-maxage": "30", // CIS benchmark - Default value of Rancher - "audit-log-maxbackup": "10", // CIS benchmark - Default value of Rancher - "audit-log-maxsize": "100", // CIS benchmark - Default value of Rancher - "profiling": "false", // CIS benchmark + "audit-policy-file": auditPolicyPath, + "audit-log-path": filepath.Join(auditLogDir, auditLogFile), // CIS benchmark + "audit-log-maxage": "30", // CIS benchmark - Default value of Rancher + "audit-log-maxbackup": "10", // CIS benchmark - Default value of Rancher + "audit-log-maxsize": "100", // CIS benchmark - Default value of Rancher + "profiling": "false", // CIS benchmark + "egress-selector-config-file": "/etc/kubernetes/egress-selector-configuration.yaml", "kubelet-certificate-authority": filepath.Join( kubeconstants.KubernetesDir, kubeconstants.DefaultCertificateDir, @@ -101,6 +102,20 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool, k8sV ReadOnly: true, PathType: corev1.HostPathFile, }, + { + Name: "egress-config", + HostPath: "/etc/kubernetes/egress-selector-configuration.yaml", + MountPath: "/etc/kubernetes/egress-selector-configuration.yaml", + ReadOnly: true, + PathType: corev1.HostPathFile, + }, + { + Name: "konnectivity-uds", + HostPath: "/etc/kubernetes/konnectivity-server", + MountPath: "/etc/kubernetes/konnectivity-server", + ReadOnly: false, + PathType: corev1.HostPathDirectoryOrCreate, + }, }, }, CertSANs: []string{"127.0.0.1"}, @@ -133,6 +148,7 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool, k8sV "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", }, // CIS benchmark + StaticPodPath: "/etc/kubernetes/manifests", TypeMeta: metav1.TypeMeta{ APIVersion: kubeletconf.SchemeGroupVersion.String(), Kind: "KubeletConfiguration", diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go b/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go new file mode 100644 index 000000000..a2713701c --- /dev/null +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity.go @@ -0,0 +1,380 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package resources + +import ( + "crypto/x509" + "crypto/x509/pkix" + + "github.com/edgelesssys/constellation/bootstrapper/internal/certificate" + "github.com/edgelesssys/constellation/internal/kubernetes" + "github.com/edgelesssys/constellation/internal/versions" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apiserver/pkg/apis/apiserver" +) + +const ( + // KonnectivityCertificateFilename is the path to the kubelets certificate. + KonnectivityCertificateFilename = "/etc/kubernetes/konnectivity.crt" + // KonnectivityKeyFilename is the path to the kubelets private key. + KonnectivityKeyFilename = "/etc/kubernetes/konnectivity.key" +) + +type konnectivityAgents struct { + DaemonSet appsv1.DaemonSet + ClusterRoleBinding rbacv1.ClusterRoleBinding + ServiceAccount corev1.ServiceAccount +} + +type konnectivityServerStaticPod struct { + StaticPod corev1.Pod +} + +type egressSelectorConfiguration struct { + EgressSelectorConfiguration apiserver.EgressSelectorConfiguration +} + +func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents { + return &konnectivityAgents{ + DaemonSet: appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "konnectivity-agent", + Namespace: "kube-system", + Labels: map[string]string{ + "k8s-app": "konnectivity-agent", + "addonmanager.kubernetes.io/mode": "Reconcile", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s-app": "konnectivity-agent", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s-app": "konnectivity-agent", + }, + }, + Spec: corev1.PodSpec{ + PriorityClassName: "system-cluster-critical", + Tolerations: []corev1.Toleration{ + { + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "CriticalAddonsOnly", + Operator: corev1.TolerationOpExists, + }, + { + Key: "node.kubernetes.io/not-ready", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + }, + }, + Containers: []corev1.Container{ + { + Name: "konnectivity-agent", + Image: versions.KonnectivityAgentImage, + Command: []string{ + "/proxy-agent", + }, + Args: []string{ + "--logtostderr=true", + "--proxy-server-host=" + konnectivityServerAddress, + "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + "--proxy-server-port=8132", + "--admin-server-port=8133", + "--health-server-port=8134", + "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token", + "--agent-identifiers=host=$(HOST_IP)", + // we will be able to avoid constant polling when either one is done: + // https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/358 + // https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/273 + "--sync-forever=true", + // Ensure stable connection to the konnectivity server. + "--keepalive-time=60s", + "--sync-interval=1s", + "--sync-interval-cap=3s", + "--probe-interval=1s", + "--v=3", + }, + Env: []corev1.EnvVar{ + { + Name: "HOST_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "status.hostIP", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "konnectivity-agent-token", + MountPath: "/var/run/secrets/tokens", + ReadOnly: true, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(8134), + }, + }, + InitialDelaySeconds: 15, + TimeoutSeconds: 15, + }, + }, + }, + ServiceAccountName: "konnectivity-agent", + Volumes: []corev1.Volume{ + { + Name: "konnectivity-agent-token", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Audience: "system:konnectivity-server", + Path: "konnectivity-agent-token", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ClusterRoleBinding: rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "system:konnectivity-server", + Labels: map[string]string{ + "kubernetes.io/cluster-service": "true", + "addonmanager.kubernetes.io/mode": "Reconcile", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "system:auth-delegator", + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: "rbac.authorization.k8s.io", + Kind: "User", + Name: "system:konnectivity-server", + }, + }, + }, + ServiceAccount: corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "konnectivity-agent", + Namespace: "kube-system", + Labels: map[string]string{ + "kubernetes.io/cluster-service": "true", + "addonmanager.kubernetes.io/mode": "Reconcile", + }, + }, + }, + } +} + +func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod { + udsHostPathType := corev1.HostPathDirectoryOrCreate + return &konnectivityServerStaticPod{ + StaticPod: corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "konnectivity-server", + Namespace: "kube-system", + }, + Spec: corev1.PodSpec{ + PriorityClassName: "system-cluster-critical", + HostNetwork: true, + Containers: []corev1.Container{ + { + Name: "konnectivity-server-container", + Image: versions.KonnectivityServerImage, + Command: []string{"/proxy-server"}, + Args: []string{ + "--logtostderr=true", + // This needs to be consistent with the value set in egressSelectorConfiguration. + "--uds-name=/etc/kubernetes/konnectivity-server/konnectivity-server.socket", + // The following two lines assume the Konnectivity server is + // deployed on the same machine as the apiserver, and the certs and + // key of the API Server are at the specified location. + "--cluster-cert=/etc/kubernetes/pki/apiserver.crt", + "--cluster-key=/etc/kubernetes/pki/apiserver.key", + // This needs to be consistent with the value set in egressSelectorConfiguration. + "--mode=grpc", + "--server-port=0", + "--agent-port=8132", + "--admin-port=8133", + "--health-port=8134", + "--v=5", + "--agent-namespace=kube-system", + "--agent-service-account=konnectivity-agent", + "--kubeconfig=/etc/kubernetes/konnectivity-server.conf", + "--authentication-audience=system:konnectivity-server", + "--proxy-strategies=destHost", + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(8134), + }, + }, + InitialDelaySeconds: 30, + TimeoutSeconds: 60, + }, + Ports: []corev1.ContainerPort{ + { + Name: "agent-port", + ContainerPort: 8132, + HostPort: 8132, + }, + { + Name: "admin-port", + ContainerPort: 8133, + HostPort: 8133, + }, + { + Name: "health-port", + ContainerPort: 8134, + HostPort: 8134, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "k8s-certs", + MountPath: "/etc/kubernetes/pki", + ReadOnly: true, + }, + { + Name: "kubeconfig", + MountPath: "/etc/kubernetes/konnectivity-server.conf", + ReadOnly: true, + }, + { + Name: "konnectivity-uds", + MountPath: "/etc/kubernetes/konnectivity-server", + ReadOnly: false, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "k8s-certs", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/kubernetes/pki", + }, + }, + }, + { + Name: "kubeconfig", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/kubernetes/konnectivity-server.conf", + }, + }, + }, + { + Name: "konnectivity-uds", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/kubernetes/konnectivity-server", + Type: &udsHostPathType, + }, + }, + }, + }, + }, + }, + } +} + +func NewEgressSelectorConfiguration() *egressSelectorConfiguration { + return &egressSelectorConfiguration{ + EgressSelectorConfiguration: apiserver.EgressSelectorConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apiserver.k8s.io/v1beta1", + Kind: "EgressSelectorConfiguration", + }, + EgressSelections: []apiserver.EgressSelection{ + { + Name: "cluster", + Connection: apiserver.Connection{ + ProxyProtocol: "GRPC", + Transport: &apiserver.Transport{ + UDS: &apiserver.UDSTransport{ + UDSName: "/etc/kubernetes/konnectivity-server/konnectivity-server.socket", + }, + }, + }, + }, + }, + }, + } +} + +func (v *konnectivityAgents) Marshal() ([]byte, error) { + return kubernetes.MarshalK8SResources(v) +} + +func (v *konnectivityServerStaticPod) Marshal() ([]byte, error) { + return kubernetes.MarshalK8SResources(v) +} + +func (v *egressSelectorConfiguration) Marshal() ([]byte, error) { + return kubernetes.MarshalK8SResources(v) +} + +// GetCertificateRequest returns a certificate request and matching private key for the konnectivity server. +func GetKonnectivityCertificateRequest() (certificateRequest []byte, privateKey []byte, err error) { + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "system:konnectivity-server", + }, + } + return certificate.GetCertificateRequest(csrTemplate) +} diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity_test.go b/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity_test.go new file mode 100644 index 000000000..3f006994c --- /dev/null +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/konnectivity_test.go @@ -0,0 +1,28 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package resources + +import ( + "testing" + + "github.com/edgelesssys/constellation/internal/kubernetes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKonnectivityMarshalUnmarshal(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + + kmsDepl := NewKonnectivityAgents("192.168.2.1") + data, err := kmsDepl.Marshal() + require.NoError(err) + + var recreated konnectivityAgents + require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated)) + assert.Equal(kmsDepl, &recreated) +} diff --git a/bootstrapper/internal/kubernetes/k8sapi/util.go b/bootstrapper/internal/kubernetes/k8sapi/util.go index c357b998d..3c325ee05 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/util.go +++ b/bootstrapper/internal/kubernetes/k8sapi/util.go @@ -28,6 +28,7 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" "github.com/edgelesssys/constellation/internal/constants" "github.com/edgelesssys/constellation/internal/kubernetes" + "github.com/edgelesssys/constellation/internal/role" kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "github.com/edgelesssys/constellation/internal/crypto" @@ -130,7 +131,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, version versions } func (k *KubernetesUtil) InitCluster( - ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, log *logger.Logger, + ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, log *logger.Logger, ) error { // TODO: audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() @@ -180,6 +181,11 @@ func (k *KubernetesUtil) InitCluster( return err } + log.Infof("Preparing node for Konnectivity") + if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint); err != nil { + return fmt.Errorf("setup konnectivity: %w", err) + } + // initialize the cluster log.Infof("Initializing the cluster using kubeadm init") cmd = exec.CommandContext(ctx, kubeadmPath, "init", "-v=5", "--skip-phases=preflight,certs,addon/kube-proxy", "--config", initConfigFile.Name()) @@ -195,6 +201,64 @@ func (k *KubernetesUtil) InitCluster( return nil } +func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context, loadBalancerEndpoint string) error { + if !strings.Contains(loadBalancerEndpoint, ":") { + loadBalancerEndpoint = net.JoinHostPort(loadBalancerEndpoint, strconv.Itoa(constants.KubernetesPort)) + } + + if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { + return fmt.Errorf("creating static pods directory: %w", err) + } + + konnectivityServerYaml, err := resources.NewKonnectivityServerStaticPod().Marshal() + if err != nil { + return fmt.Errorf("generating konnectivity server static pod: %w", err) + } + if err := os.WriteFile("/etc/kubernetes/manifests/konnectivity-server.yaml", konnectivityServerYaml, 0o644); err != nil { + return fmt.Errorf("writing konnectivity server pod: %w", err) + } + + egressConfigYaml, err := resources.NewEgressSelectorConfiguration().Marshal() + if err != nil { + return fmt.Errorf("generating egress selector configuration: %w", err) + } + if err := os.WriteFile("/etc/kubernetes/egress-selector-configuration.yaml", egressConfigYaml, 0o644); err != nil { + return fmt.Errorf("writing egress selector config: %w", err) + } + + if err := k.createSignedKonnectivityCert(); err != nil { + return fmt.Errorf("generating konnectivity server certificate: %w", err) + } + + if out, err := exec.CommandContext(ctx, kubectlPath, "config", "set-credentials", "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "system:konnectivity-server", + "--client-certificate", "/etc/kubernetes/konnectivity.crt", "--client-key", "/etc/kubernetes/konnectivity.key", "--embed-certs=true").CombinedOutput(); err != nil { + return fmt.Errorf("konnectivity kubeconfig set-credentials: %w, %s", err, string(out)) + } + if out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "set-cluster", "kubernetes", "--server", "https://"+loadBalancerEndpoint, + "--certificate-authority", "/etc/kubernetes/pki/ca.crt", "--embed-certs=true").CombinedOutput(); err != nil { + return fmt.Errorf("konnectivity kubeconfig set-cluster: %w, %s", err, string(out)) + } + if out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "set-context", "system:konnectivity-server@kubernetes", + "--cluster", "kubernetes", "--user", "system:konnectivity-server").CombinedOutput(); err != nil { + return fmt.Errorf("konnectivity kubeconfig set-context: %w, %s", err, string(out)) + } + if out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", "/etc/kubernetes/konnectivity-server.conf", "config", "use-context", "system:konnectivity-server@kubernetes").CombinedOutput(); err != nil { + return fmt.Errorf("konnectivity kubeconfig use-context: %w, %s", err, string(out)) + } + // cleanup + if err := os.Remove("/etc/kubernetes/konnectivity.crt"); err != nil { + return fmt.Errorf("removing konnectivity certificate: %w", err) + } + if err := os.Remove("/etc/kubernetes/konnectivity.key"); err != nil { + return fmt.Errorf("removing konnectivity key: %w", err) + } + return nil +} + +func (k *KubernetesUtil) SetupKonnectivity(kubectl Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error { + return kubectl.Apply(konnectivityAgentsDaemonSet, true) +} + func (k *KubernetesUtil) SetupHelmDeployments(ctx context.Context, kubectl Client, helmDeployments []byte, in SetupPodNetworkInput, log *logger.Logger) error { var helmDeploy helm.Deployments if err := json.Unmarshal(helmDeployments, &helmDeploy); err != nil { @@ -450,7 +514,7 @@ func (k *KubernetesUtil) SetupNodeOperator(ctx context.Context, kubectl Client, } // JoinCluster joins existing Kubernetes cluster using kubeadm join. -func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error { +func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error { // TODO: audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() if err != nil { @@ -469,6 +533,13 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log return fmt.Errorf("writing kubeadm init yaml config %v: %w", joinConfigFile.Name(), err) } + if peerRole == role.ControlPlane { + log.Infof("Prep Init Kubernetes cluster") + if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint); err != nil { + return fmt.Errorf("setup konnectivity: %w", err) + } + } + // run `kubeadm join` to join a worker node to an existing Kubernetes cluster cmd := exec.CommandContext(ctx, kubeadmPath, "join", "-v=5", "--config", joinConfigFile.Name()) out, err := cmd.CombinedOutput() @@ -497,6 +568,7 @@ func (k *KubernetesUtil) StartKubelet() error { // createSignedKubeletCert manually creates a Kubernetes CA signed kubelet certificate for the bootstrapper node. // This is necessary because this node does not request a certificate from the join service. func (k *KubernetesUtil) createSignedKubeletCert(nodeName string, ips []net.IP) error { + // Create CSR certRequestRaw, kubeletKey, err := kubelet.GetCertificateRequest(nodeName, ips) if err != nil { return err @@ -505,48 +577,12 @@ func (k *KubernetesUtil) createSignedKubeletCert(nodeName string, ips []net.IP) return err } - parentCertRaw, err := k.file.Read(filepath.Join( - kubeconstants.KubernetesDir, - kubeconstants.DefaultCertificateDir, - kubeconstants.CACertName, - )) - if err != nil { - return err - } - parentCert, err := crypto.PemToX509Cert(parentCertRaw) - if err != nil { - return err - } - - parentKeyRaw, err := k.file.Read(filepath.Join( - kubeconstants.KubernetesDir, - kubeconstants.DefaultCertificateDir, - kubeconstants.CAKeyName, - )) - if err != nil { - return err - } - parentKeyPEM, _ := pem.Decode(parentKeyRaw) - var parentKey any - switch parentKeyPEM.Type { - case "EC PRIVATE KEY": - parentKey, err = x509.ParseECPrivateKey(parentKeyPEM.Bytes) - case "RSA PRIVATE KEY": - parentKey, err = x509.ParsePKCS1PrivateKey(parentKeyPEM.Bytes) - case "PRIVATE KEY": - parentKey, err = x509.ParsePKCS8PrivateKey(parentKeyPEM.Bytes) - default: - err = fmt.Errorf("unsupported key type %q", parentKeyPEM.Type) - } - if err != nil { - return err - } - certRequest, err := x509.ParseCertificateRequest(certRequestRaw) if err != nil { return err } + // Prepare certificate signing serialNumber, err := crypto.GenerateCertificateSerialNumber() if err != nil { return err @@ -570,10 +606,18 @@ func (k *KubernetesUtil) createSignedKubeletCert(nodeName string, ips []net.IP) IPAddresses: certRequest.IPAddresses, } + parentCert, parentKey, err := k.getKubernetesCACertAndKey() + if err != nil { + return err + } + + // Sign the certificate certRaw, err := x509.CreateCertificate(rand.Reader, certTmpl, parentCert, certRequest.PublicKey, parentKey) if err != nil { return err } + + // Write the certificate kubeletCert := pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: certRaw, @@ -581,3 +625,97 @@ func (k *KubernetesUtil) createSignedKubeletCert(nodeName string, ips []net.IP) return k.file.Write(kubelet.CertificateFilename, kubeletCert, file.OptMkdirAll) } + +// createSignedKonnectivityCert manually creates a Kubernetes CA signed certificate for the Konnectivity server. +func (k *KubernetesUtil) createSignedKonnectivityCert() error { + // Create CSR + certRequestRaw, keyPem, err := resources.GetKonnectivityCertificateRequest() + if err != nil { + return err + } + if err := k.file.Write(resources.KonnectivityKeyFilename, keyPem, file.OptMkdirAll); err != nil { + return err + } + + certRequest, err := x509.ParseCertificateRequest(certRequestRaw) + if err != nil { + return err + } + + // Prepare certificate signing + serialNumber, err := crypto.GenerateCertificateSerialNumber() + if err != nil { + return err + } + + now := time.Now() + // Create the kubelet certificate + // For a reference on the certificate fields, see: https://kubernetes.io/docs/setup/best-practices/certificates/ + certTmpl := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: now.Add(-2 * time.Hour), + NotAfter: now.Add(24 * 365 * time.Hour), + Subject: certRequest.Subject, + } + + parentCert, parentKey, err := k.getKubernetesCACertAndKey() + if err != nil { + return err + } + + // Sign the certificate + certRaw, err := x509.CreateCertificate(rand.Reader, certTmpl, parentCert, certRequest.PublicKey, parentKey) + if err != nil { + return err + } + + // Write the certificate + konnectivityCert := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certRaw, + }) + + return k.file.Write(resources.KonnectivityCertificateFilename, konnectivityCert, file.OptMkdirAll) +} + +// getKubernetesCACertAndKey returns the Kubernetes CA certificate and key. +// The key of type `any` can be consumed by `x509.CreateCertificate()`. +func (k *KubernetesUtil) getKubernetesCACertAndKey() (*x509.Certificate, any, error) { + parentCertRaw, err := k.file.Read(filepath.Join( + kubeconstants.KubernetesDir, + kubeconstants.DefaultCertificateDir, + kubeconstants.CACertName, + )) + if err != nil { + return nil, nil, err + } + parentCert, err := crypto.PemToX509Cert(parentCertRaw) + if err != nil { + return nil, nil, err + } + + parentKeyRaw, err := k.file.Read(filepath.Join( + kubeconstants.KubernetesDir, + kubeconstants.DefaultCertificateDir, + kubeconstants.CAKeyName, + )) + if err != nil { + return nil, nil, err + } + parentKeyPEM, _ := pem.Decode(parentKeyRaw) + var parentKey any + switch parentKeyPEM.Type { + case "EC PRIVATE KEY": + parentKey, err = x509.ParseECPrivateKey(parentKeyPEM.Bytes) + case "RSA PRIVATE KEY": + parentKey, err = x509.ParsePKCS1PrivateKey(parentKeyPEM.Bytes) + case "PRIVATE KEY": + parentKey, err = x509.ParsePKCS8PrivateKey(parentKeyPEM.Bytes) + default: + err = fmt.Errorf("unsupported key type %q", parentKeyPEM.Type) + } + if err != nil { + return nil, nil, err + } + return parentCert, parentKey, nil +} diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index 55d97a654..82d4df739 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -13,19 +13,21 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/internal/kubernetes" "github.com/edgelesssys/constellation/internal/logger" + "github.com/edgelesssys/constellation/internal/role" "github.com/edgelesssys/constellation/internal/versions" ) type clusterUtil interface { InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error - InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, log *logger.Logger) error - JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error + InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, log *logger.Logger) error + JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error SetupHelmDeployments(ctx context.Context, client k8sapi.Client, helmDeployments []byte, in k8sapi.SetupPodNetworkInput, log *logger.Logger) error SetupAccessManager(kubectl k8sapi.Client, sshUsers kubernetes.Marshaler) error SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error SetupJoinService(kubectl k8sapi.Client, joinServiceConfiguration kubernetes.Marshaler) error SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration kubernetes.Marshaler, configMaps kubernetes.Marshaler, secrets kubernetes.Marshaler) error SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration kubernetes.Marshaler) error + SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error SetupKMS(kubectl k8sapi.Client, kmsConfiguration kubernetes.Marshaler) error SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index 16007d0fa..3b8d06c90 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -19,7 +19,6 @@ import ( "github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources" "github.com/edgelesssys/constellation/internal/cloud/metadata" "github.com/edgelesssys/constellation/internal/constants" - "github.com/edgelesssys/constellation/internal/iproute" "github.com/edgelesssys/constellation/internal/logger" "github.com/edgelesssys/constellation/internal/role" "github.com/edgelesssys/constellation/internal/versions" @@ -156,7 +155,7 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err) } log.Infof("Initializing Kubernetes cluster") - if err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, validIPs, log); err != nil { + if err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, validIPs, controlPlaneEndpoint, log); err != nil { return nil, fmt.Errorf("kubeadm init: %w", err) } kubeConfig, err := k.GetKubeconfig() @@ -178,6 +177,19 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("setting up pod network: %w", err) } + var controlPlaneIP string + if strings.Contains(controlPlaneEndpoint, ":") { + controlPlaneIP, _, err = net.SplitHostPort(controlPlaneEndpoint) + if err != nil { + return nil, fmt.Errorf("parsing control plane endpoint: %w", err) + } + } else { + controlPlaneIP = controlPlaneEndpoint + } + if err = k.clusterUtil.SetupKonnectivity(k.client, resources.NewKonnectivityAgents(controlPlaneIP)); err != nil { + return nil, fmt.Errorf("setting up konnectivity: %w", err) + } + kms := resources.NewKMSDeployment(k.cloudProvider, kmsConfig) if err = k.clusterUtil.SetupKMS(k.client, kms); err != nil { return nil, fmt.Errorf("setting up kms: %w", err) @@ -277,19 +289,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo zap.String("nodeIP", nodeInternalIP), ).Infof("Setting information for node") - // Step 2: Remove load balancer from local routing table on GCP. - if k.cloudProvider == "gcp" { - ip, _, err := net.SplitHostPort(loadbalancerEndpoint) - if err != nil { - return fmt.Errorf("parsing load balancer IP: %w", err) - } - if err := iproute.RemoveFromLocalRoutingTable(ctx, ip); err != nil { - return fmt.Errorf("removing load balancer IP from routing table: %w", err) - } - log.Infof("Removed load balancer IP from routing table") - } - - // Step 3: configure kubeadm join config + // Step 2: configure kubeadm join config joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported()) joinConfig.SetAPIServerEndpoint(args.APIServerEndpoint) joinConfig.SetToken(args.Token) @@ -305,7 +305,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err) } log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster") - if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, log); err != nil { + if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadbalancerEndpoint, log); err != nil { return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err) } diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index 5c82dd462..6b1815634 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -263,6 +263,18 @@ func TestInitCluster(t *testing.T) { wantErr: true, k8sVersion: versions.Default, }, + "kubeadm init fails when setting up konnectivity": { + clusterUtil: stubClusterUtil{setupKonnectivityError: someErr}, + kubeconfigReader: &stubKubeconfigReader{ + Kubeconfig: []byte("someKubeconfig"), + }, + providerMetadata: &stubProviderMetadata{SupportedResp: false}, + CloudControllerManager: &stubCloudControllerManager{}, + CloudNodeManager: &stubCloudNodeManager{SupportedResp: false}, + ClusterAutoscaler: &stubClusterAutoscaler{}, + wantErr: true, + k8sVersion: versions.Default, + }, "kubeadm init fails when setting up verification service": { clusterUtil: stubClusterUtil{setupVerificationServiceErr: someErr}, kubeconfigReader: &stubKubeconfigReader{ @@ -522,6 +534,7 @@ type stubClusterUtil struct { setupJoinServiceError error setupCloudControllerManagerError error setupCloudNodeManagerError error + setupKonnectivityError error setupKMSError error setupAccessManagerError error setupVerificationServiceErr error @@ -536,11 +549,15 @@ type stubClusterUtil struct { joinConfigs [][]byte } +func (s *stubClusterUtil) SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error { + return s.setupKonnectivityError +} + func (s *stubClusterUtil) InstallComponents(ctx context.Context, version versions.ValidK8sVersion) error { return s.installComponentsErr } -func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, log *logger.Logger) error { +func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, log *logger.Logger) error { s.initConfigs = append(s.initConfigs, initConfig) return s.initClusterErr } @@ -593,7 +610,7 @@ func (s *stubClusterUtil) SetupNodeOperator(ctx context.Context, kubectl k8sapi. return s.setupNodeOperatorErr } -func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error { +func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error { s.joinConfigs = append(s.joinConfigs, joinConfig) return s.joinClusterErr } diff --git a/cli/internal/azure/loadbalancer.go b/cli/internal/azure/loadbalancer.go index e4641e481..d397e4ea7 100644 --- a/cli/internal/azure/loadbalancer.go +++ b/cli/internal/azure/loadbalancer.go @@ -30,6 +30,7 @@ const ( verifyHealthProbeName = "verifyHealthProbe" coordHealthProbeName = "coordHealthProbe" debugdHealthProbeName = "debugdHealthProbe" + konnectivityHealthProbeName = "konnectivityHealthProbe" ) // Azure returns a Azure representation of LoadBalancer. @@ -86,6 +87,13 @@ func (l LoadBalancer) Azure() armnetwork.LoadBalancer { Port: to.Ptr[int32](constants.DebugdPort), }, }, + { + Name: to.Ptr(konnectivityHealthProbeName), + Properties: &armnetwork.ProbePropertiesFormat{ + Protocol: to.Ptr(armnetwork.ProbeProtocolTCP), + Port: to.Ptr[int32](constants.KonnectivityPort), + }, + }, }, LoadBalancingRules: []*armnetwork.LoadBalancingRule{ { @@ -175,6 +183,35 @@ func (l LoadBalancer) Azure() armnetwork.LoadBalancer { }, }, }, + { + Name: to.Ptr("konnectivityLoadBalancerRule"), + Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ + FrontendIPConfiguration: &armnetwork.SubResource{ + ID: to.Ptr("/subscriptions/" + l.Subscription + + "/resourceGroups/" + l.ResourceGroup + + "/providers/Microsoft.Network/loadBalancers/" + l.Name + + "/frontendIPConfigurations/" + frontEndIPConfigName), + }, + FrontendPort: to.Ptr[int32](constants.KonnectivityPort), + BackendPort: to.Ptr[int32](constants.KonnectivityPort), + Protocol: to.Ptr(armnetwork.TransportProtocolTCP), + Probe: &armnetwork.SubResource{ + ID: to.Ptr("/subscriptions/" + l.Subscription + + "/resourceGroups/" + l.ResourceGroup + + "/providers/Microsoft.Network/loadBalancers/" + l.Name + + "/probes/" + konnectivityHealthProbeName), + }, + DisableOutboundSnat: to.Ptr(true), + BackendAddressPools: []*armnetwork.SubResource{ + { + ID: to.Ptr("/subscriptions/" + l.Subscription + + "/resourceGroups/" + l.ResourceGroup + + "/providers/Microsoft.Network/loadBalancers/" + l.Name + + "/backendAddressPools/" + backEndAddressPoolControlPlaneName), + }, + }, + }, + }, }, OutboundRules: []*armnetwork.OutboundRule{ { diff --git a/cli/internal/cloudcmd/create_test.go b/cli/internal/cloudcmd/create_test.go index 4d05b6972..a48b25804 100644 --- a/cli/internal/cloudcmd/create_test.go +++ b/cli/internal/cloudcmd/create_test.go @@ -40,7 +40,7 @@ func TestCreator(t *testing.T) { GCPSubnetwork: "subnetwork", GCPLoadbalancers: []string{"kube-lb", "boot-lb", "verify-lb"}, GCPFirewalls: []string{ - "bootstrapper", "ssh", "nodeport", "kubernetes", + "bootstrapper", "ssh", "nodeport", "kubernetes", "konnectivity", "allow-cluster-internal-tcp", "allow-cluster-internal-udp", "allow-cluster-internal-icmp", "allow-node-internal-tcp", "allow-node-internal-udp", "allow-node-internal-icmp", }, diff --git a/cli/internal/gcp/client/api.go b/cli/internal/gcp/client/api.go index 7a20c32f2..f3d6319fe 100644 --- a/cli/internal/gcp/client/api.go +++ b/cli/internal/gcp/client/api.go @@ -49,29 +49,37 @@ type firewallsAPI interface { type forwardingRulesAPI interface { Close() error - Delete(ctx context.Context, req *computepb.DeleteForwardingRuleRequest, + Delete(ctx context.Context, req *computepb.DeleteGlobalForwardingRuleRequest, opts ...gax.CallOption) (Operation, error) - Insert(ctx context.Context, req *computepb.InsertForwardingRuleRequest, + Insert(ctx context.Context, req *computepb.InsertGlobalForwardingRuleRequest, opts ...gax.CallOption) (Operation, error) - Get(ctx context.Context, req *computepb.GetForwardingRuleRequest, + Get(ctx context.Context, req *computepb.GetGlobalForwardingRuleRequest, opts ...gax.CallOption) (*computepb.ForwardingRule, error) - SetLabels(ctx context.Context, req *computepb.SetLabelsForwardingRuleRequest, + SetLabels(ctx context.Context, req *computepb.SetLabelsGlobalForwardingRuleRequest, opts ...gax.CallOption) (Operation, error) } type backendServicesAPI interface { Close() error - Delete(ctx context.Context, req *computepb.DeleteRegionBackendServiceRequest, + Delete(ctx context.Context, req *computepb.DeleteBackendServiceRequest, opts ...gax.CallOption) (Operation, error) - Insert(ctx context.Context, req *computepb.InsertRegionBackendServiceRequest, + Insert(ctx context.Context, req *computepb.InsertBackendServiceRequest, opts ...gax.CallOption) (Operation, error) } type healthChecksAPI interface { Close() error - Delete(ctx context.Context, req *computepb.DeleteRegionHealthCheckRequest, + Delete(ctx context.Context, req *computepb.DeleteHealthCheckRequest, opts ...gax.CallOption) (Operation, error) - Insert(ctx context.Context, req *computepb.InsertRegionHealthCheckRequest, + Insert(ctx context.Context, req *computepb.InsertHealthCheckRequest, + opts ...gax.CallOption) (Operation, error) +} + +type targetTCPProxiesAPI interface { + Close() error + Delete(ctx context.Context, req *computepb.DeleteTargetTcpProxyRequest, + opts ...gax.CallOption) (Operation, error) + Insert(ctx context.Context, req *computepb.InsertTargetTcpProxyRequest, opts ...gax.CallOption) (Operation, error) } @@ -129,11 +137,11 @@ type projectsAPI interface { type addressesAPI interface { Close() error - Insert(ctx context.Context, req *computepb.InsertAddressRequest, + Insert(ctx context.Context, req *computepb.InsertGlobalAddressRequest, opts ...gax.CallOption) (Operation, error) - Get(ctx context.Context, req *computepb.GetAddressRequest, + Get(ctx context.Context, req *computepb.GetGlobalAddressRequest, opts ...gax.CallOption) (*computepb.Address, error) - Delete(ctx context.Context, req *computepb.DeleteAddressRequest, + Delete(ctx context.Context, req *computepb.DeleteGlobalAddressRequest, opts ...gax.CallOption) (Operation, error) } diff --git a/cli/internal/gcp/client/api_test.go b/cli/internal/gcp/client/api_test.go index 08f81ba2e..6af12f0ab 100644 --- a/cli/internal/gcp/client/api_test.go +++ b/cli/internal/gcp/client/api_test.go @@ -231,7 +231,7 @@ func (a stubBackendServicesAPI) Close() error { return nil } -func (a stubBackendServicesAPI) Insert(ctx context.Context, req *computepb.InsertRegionBackendServiceRequest, +func (a stubBackendServicesAPI) Insert(ctx context.Context, req *computepb.InsertBackendServiceRequest, opts ...gax.CallOption, ) (Operation, error) { if a.insertErr != nil { @@ -239,13 +239,12 @@ func (a stubBackendServicesAPI) Insert(ctx context.Context, req *computepb.Inser } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } -func (a stubBackendServicesAPI) Delete(ctx context.Context, req *computepb.DeleteRegionBackendServiceRequest, +func (a stubBackendServicesAPI) Delete(ctx context.Context, req *computepb.DeleteBackendServiceRequest, opts ...gax.CallOption, ) (Operation, error) { if a.deleteErr != nil { @@ -253,8 +252,42 @@ func (a stubBackendServicesAPI) Delete(ctx context.Context, req *computepb.Delet } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), + }, + }, nil +} + +type stubTargetTCPProxiesAPI struct { + insertErr error + deleteErr error +} + +func (a stubTargetTCPProxiesAPI) Close() error { + return nil +} + +func (a stubTargetTCPProxiesAPI) Insert(ctx context.Context, req *computepb.InsertTargetTcpProxyRequest, + opts ...gax.CallOption, +) (Operation, error) { + if a.insertErr != nil { + return nil, a.insertErr + } + return &stubOperation{ + &computepb.Operation{ + Name: proto.String("name"), + }, + }, nil +} + +func (a stubTargetTCPProxiesAPI) Delete(ctx context.Context, req *computepb.DeleteTargetTcpProxyRequest, + opts ...gax.CallOption, +) (Operation, error) { + if a.deleteErr != nil { + return nil, a.deleteErr + } + return &stubOperation{ + &computepb.Operation{ + Name: proto.String("name"), }, }, nil } @@ -271,7 +304,7 @@ func (a stubForwardingRulesAPI) Close() error { return nil } -func (a stubForwardingRulesAPI) Insert(ctx context.Context, req *computepb.InsertForwardingRuleRequest, +func (a stubForwardingRulesAPI) Insert(ctx context.Context, req *computepb.InsertGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (Operation, error) { if a.insertErr != nil { @@ -279,13 +312,12 @@ func (a stubForwardingRulesAPI) Insert(ctx context.Context, req *computepb.Inser } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } -func (a stubForwardingRulesAPI) Delete(ctx context.Context, req *computepb.DeleteForwardingRuleRequest, +func (a stubForwardingRulesAPI) Delete(ctx context.Context, req *computepb.DeleteGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (Operation, error) { if a.deleteErr != nil { @@ -293,13 +325,12 @@ func (a stubForwardingRulesAPI) Delete(ctx context.Context, req *computepb.Delet } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } -func (a stubForwardingRulesAPI) Get(ctx context.Context, req *computepb.GetForwardingRuleRequest, +func (a stubForwardingRulesAPI) Get(ctx context.Context, req *computepb.GetGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (*computepb.ForwardingRule, error) { if a.getErr != nil { @@ -308,7 +339,7 @@ func (a stubForwardingRulesAPI) Get(ctx context.Context, req *computepb.GetForwa return a.forwardingRule, nil } -func (a stubForwardingRulesAPI) SetLabels(ctx context.Context, req *computepb.SetLabelsForwardingRuleRequest, +func (a stubForwardingRulesAPI) SetLabels(ctx context.Context, req *computepb.SetLabelsGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (Operation, error) { if a.deleteErr != nil { @@ -316,8 +347,7 @@ func (a stubForwardingRulesAPI) SetLabels(ctx context.Context, req *computepb.Se } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } @@ -331,7 +361,7 @@ func (a stubHealthChecksAPI) Close() error { return nil } -func (a stubHealthChecksAPI) Insert(ctx context.Context, req *computepb.InsertRegionHealthCheckRequest, +func (a stubHealthChecksAPI) Insert(ctx context.Context, req *computepb.InsertHealthCheckRequest, opts ...gax.CallOption, ) (Operation, error) { if a.insertErr != nil { @@ -339,13 +369,12 @@ func (a stubHealthChecksAPI) Insert(ctx context.Context, req *computepb.InsertRe } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } -func (a stubHealthChecksAPI) Delete(ctx context.Context, req *computepb.DeleteRegionHealthCheckRequest, +func (a stubHealthChecksAPI) Delete(ctx context.Context, req *computepb.DeleteHealthCheckRequest, opts ...gax.CallOption, ) (Operation, error) { if a.deleteErr != nil { @@ -353,8 +382,7 @@ func (a stubHealthChecksAPI) Delete(ctx context.Context, req *computepb.DeleteRe } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } @@ -511,7 +539,7 @@ type stubAddressesAPI struct { deleteErr error } -func (a stubAddressesAPI) Insert(context.Context, *computepb.InsertAddressRequest, +func (a stubAddressesAPI) Insert(context.Context, *computepb.InsertGlobalAddressRequest, ...gax.CallOption, ) (Operation, error) { if a.insertErr != nil { @@ -519,19 +547,18 @@ func (a stubAddressesAPI) Insert(context.Context, *computepb.InsertAddressReques } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } -func (a stubAddressesAPI) Get(ctx context.Context, req *computepb.GetAddressRequest, +func (a stubAddressesAPI) Get(ctx context.Context, req *computepb.GetGlobalAddressRequest, opts ...gax.CallOption, ) (*computepb.Address, error) { return &computepb.Address{Address: a.getAddr}, a.getErr } -func (a stubAddressesAPI) Delete(context.Context, *computepb.DeleteAddressRequest, +func (a stubAddressesAPI) Delete(context.Context, *computepb.DeleteGlobalAddressRequest, ...gax.CallOption, ) (Operation, error) { if a.deleteErr != nil { @@ -539,8 +566,7 @@ func (a stubAddressesAPI) Delete(context.Context, *computepb.DeleteAddressReques } return &stubOperation{ &computepb.Operation{ - Name: proto.String("name"), - Region: proto.String("region"), + Name: proto.String("name"), }, }, nil } diff --git a/cli/internal/gcp/client/client.go b/cli/internal/gcp/client/client.go index 654fac58e..fe9960e8c 100644 --- a/cli/internal/gcp/client/client.go +++ b/cli/internal/gcp/client/client.go @@ -36,6 +36,7 @@ type Client struct { forwardingRulesAPI backendServicesAPI healthChecksAPI + targetTCPProxiesAPI instanceTemplateAPI instanceGroupManagersAPI iamAPI @@ -109,25 +110,31 @@ func NewFromDefault(ctx context.Context) (*Client, error) { return nil, err } closers = append(closers, subnetAPI) - forwardingRulesAPI, err := compute.NewForwardingRulesRESTClient(ctx) + forwardingRulesAPI, err := compute.NewGlobalForwardingRulesRESTClient(ctx) if err != nil { _ = closeAll(closers) return nil, err } closers = append(closers, forwardingRulesAPI) - backendServicesAPI, err := compute.NewRegionBackendServicesRESTClient(ctx) + backendServicesAPI, err := compute.NewBackendServicesRESTClient(ctx) if err != nil { _ = closeAll(closers) return nil, err } closers = append(closers, backendServicesAPI) + targetTCPProxiesAPI, err := compute.NewTargetTcpProxiesRESTClient(ctx) + if err != nil { + _ = closeAll(closers) + return nil, err + } + closers = append(closers, targetTCPProxiesAPI) targetPoolsAPI, err := compute.NewTargetPoolsRESTClient(ctx) if err != nil { _ = closeAll(closers) return nil, err } closers = append(closers, targetPoolsAPI) - healthChecksAPI, err := compute.NewRegionHealthChecksRESTClient(ctx) + healthChecksAPI, err := compute.NewHealthChecksRESTClient(ctx) if err != nil { _ = closeAll(closers) return nil, err @@ -157,7 +164,7 @@ func NewFromDefault(ctx context.Context) (*Client, error) { return nil, err } closers = append(closers, projectsAPI) - addressesAPI, err := compute.NewAddressesRESTClient(ctx) + addressesAPI, err := compute.NewGlobalAddressesRESTClient(ctx) if err != nil { _ = closeAll(closers) return nil, err @@ -173,6 +180,7 @@ func NewFromDefault(ctx context.Context) (*Client, error) { firewallsAPI: &firewallsClient{fwAPI}, forwardingRulesAPI: &forwardingRulesClient{forwardingRulesAPI}, backendServicesAPI: &backendServicesClient{backendServicesAPI}, + targetTCPProxiesAPI: &targetTCPProxiesClient{targetTCPProxiesAPI}, healthChecksAPI: &healthChecksClient{healthChecksAPI}, instanceTemplateAPI: &instanceTemplateClient{templAPI}, instanceGroupManagersAPI: &instanceGroupManagersClient{groupAPI}, @@ -282,6 +290,7 @@ func (c *Client) SetState(stat state.ConstellationState) { hasForwardingRules: true, hasBackendService: true, hasHealthCheck: true, + hasTargetTCPProxy: true, } c.loadbalancers = append(c.loadbalancers, lb) } diff --git a/cli/internal/gcp/client/gcpwrappers.go b/cli/internal/gcp/client/gcpwrappers.go index 5be89b348..eb0bc2860 100644 --- a/cli/internal/gcp/client/gcpwrappers.go +++ b/cli/internal/gcp/client/gcpwrappers.go @@ -53,75 +53,95 @@ func (c *firewallsClient) Insert(ctx context.Context, req *computepb.InsertFirew } type forwardingRulesClient struct { - *compute.ForwardingRulesClient + *compute.GlobalForwardingRulesClient } func (c *forwardingRulesClient) Close() error { - return c.ForwardingRulesClient.Close() + return c.GlobalForwardingRulesClient.Close() } -func (c *forwardingRulesClient) Delete(ctx context.Context, req *computepb.DeleteForwardingRuleRequest, +func (c *forwardingRulesClient) Delete(ctx context.Context, req *computepb.DeleteGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.ForwardingRulesClient.Delete(ctx, req) + return c.GlobalForwardingRulesClient.Delete(ctx, req) } -func (c *forwardingRulesClient) Insert(ctx context.Context, req *computepb.InsertForwardingRuleRequest, +func (c *forwardingRulesClient) Insert(ctx context.Context, req *computepb.InsertGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.ForwardingRulesClient.Insert(ctx, req) + return c.GlobalForwardingRulesClient.Insert(ctx, req) } -func (c *forwardingRulesClient) Get(ctx context.Context, req *computepb.GetForwardingRuleRequest, +func (c *forwardingRulesClient) Get(ctx context.Context, req *computepb.GetGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (*computepb.ForwardingRule, error) { - return c.ForwardingRulesClient.Get(ctx, req) + return c.GlobalForwardingRulesClient.Get(ctx, req) } -func (c *forwardingRulesClient) SetLabels(ctx context.Context, req *computepb.SetLabelsForwardingRuleRequest, +func (c *forwardingRulesClient) SetLabels(ctx context.Context, req *computepb.SetLabelsGlobalForwardingRuleRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.ForwardingRulesClient.SetLabels(ctx, req) + return c.GlobalForwardingRulesClient.SetLabels(ctx, req) } type backendServicesClient struct { - *compute.RegionBackendServicesClient + *compute.BackendServicesClient } func (c *backendServicesClient) Close() error { - return c.RegionBackendServicesClient.Close() + return c.BackendServicesClient.Close() } -func (c *backendServicesClient) Insert(ctx context.Context, req *computepb.InsertRegionBackendServiceRequest, +func (c *backendServicesClient) Insert(ctx context.Context, req *computepb.InsertBackendServiceRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.RegionBackendServicesClient.Insert(ctx, req) + return c.BackendServicesClient.Insert(ctx, req) } -func (c *backendServicesClient) Delete(ctx context.Context, req *computepb.DeleteRegionBackendServiceRequest, +func (c *backendServicesClient) Delete(ctx context.Context, req *computepb.DeleteBackendServiceRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.RegionBackendServicesClient.Delete(ctx, req) + return c.BackendServicesClient.Delete(ctx, req) +} + +type targetTCPProxiesClient struct { + *compute.TargetTcpProxiesClient +} + +func (c *targetTCPProxiesClient) Close() error { + return c.TargetTcpProxiesClient.Close() +} + +func (c *targetTCPProxiesClient) Delete(ctx context.Context, req *computepb.DeleteTargetTcpProxyRequest, + opts ...gax.CallOption, +) (Operation, error) { + return c.TargetTcpProxiesClient.Delete(ctx, req) +} + +func (c *targetTCPProxiesClient) Insert(ctx context.Context, req *computepb.InsertTargetTcpProxyRequest, + opts ...gax.CallOption, +) (Operation, error) { + return c.TargetTcpProxiesClient.Insert(ctx, req) } type healthChecksClient struct { - *compute.RegionHealthChecksClient + *compute.HealthChecksClient } func (c *healthChecksClient) Close() error { - return c.RegionHealthChecksClient.Close() + return c.HealthChecksClient.Close() } -func (c *healthChecksClient) Delete(ctx context.Context, req *computepb.DeleteRegionHealthCheckRequest, +func (c *healthChecksClient) Delete(ctx context.Context, req *computepb.DeleteHealthCheckRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.RegionHealthChecksClient.Delete(ctx, req) + return c.HealthChecksClient.Delete(ctx, req) } -func (c *healthChecksClient) Insert(ctx context.Context, req *computepb.InsertRegionHealthCheckRequest, +func (c *healthChecksClient) Insert(ctx context.Context, req *computepb.InsertHealthCheckRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.RegionHealthChecksClient.Insert(ctx, req) + return c.HealthChecksClient.Insert(ctx, req) } type networksClient struct { @@ -257,21 +277,21 @@ func (c *projectsClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPoli } type addressesClient struct { - *compute.AddressesClient + *compute.GlobalAddressesClient } -func (c *addressesClient) Insert(ctx context.Context, req *computepb.InsertAddressRequest, +func (c *addressesClient) Insert(ctx context.Context, req *computepb.InsertGlobalAddressRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.AddressesClient.Insert(ctx, req) + return c.GlobalAddressesClient.Insert(ctx, req) } -func (c *addressesClient) Delete(ctx context.Context, req *computepb.DeleteAddressRequest, +func (c *addressesClient) Delete(ctx context.Context, req *computepb.DeleteGlobalAddressRequest, opts ...gax.CallOption, ) (Operation, error) { - return c.AddressesClient.Delete(ctx, req) + return c.GlobalAddressesClient.Delete(ctx, req) } func (c *addressesClient) Close() error { - return c.AddressesClient.Close() + return c.GlobalAddressesClient.Close() } diff --git a/cli/internal/gcp/client/instances.go b/cli/internal/gcp/client/instances.go index 44db68e6f..0b6f25255 100644 --- a/cli/internal/gcp/client/instances.go +++ b/cli/internal/gcp/client/instances.go @@ -90,6 +90,7 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput {Name: proto.String("debugd"), Port: proto.Int32(constants.DebugdPort)}, {Name: proto.String("bootstrapper"), Port: proto.Int32(constants.BootstrapperPort)}, {Name: proto.String("verify"), Port: proto.Int32(constants.VerifyServiceNodePortGRPC)}, + {Name: proto.String("konnectivity"), Port: proto.Int32(constants.KonnectivityPort)}, }, Template: c.controlPlaneTemplate, UID: c.uid, diff --git a/cli/internal/gcp/client/loadbalancer.go b/cli/internal/gcp/client/loadbalancer.go index 907b6f401..38598cd8e 100644 --- a/cli/internal/gcp/client/loadbalancer.go +++ b/cli/internal/gcp/client/loadbalancer.go @@ -32,6 +32,7 @@ type loadBalancer struct { hasHealthCheck bool hasBackendService bool hasForwardingRules bool + hasTargetTCPProxy bool } // CreateLoadBalancers creates all necessary load balancers. @@ -69,6 +70,14 @@ func (c *Client) CreateLoadBalancers(ctx context.Context, isDebugCluster bool) e healthCheck: computepb.HealthCheck_TCP, }) + c.loadbalancers = append(c.loadbalancers, &loadBalancer{ + name: c.buildResourceName("konnectivity"), + ip: c.loadbalancerIPname, + frontendPort: constants.KonnectivityPort, + backendPortName: "konnectivity", + healthCheck: computepb.HealthCheck_TCP, + }) + // Only create when the debug cluster flag is set in the Constellation config if isDebugCluster { c.loadbalancers = append(c.loadbalancers, &loadBalancer{ @@ -107,6 +116,9 @@ func (c *Client) createLoadBalancer(ctx context.Context, lb *loadBalancer) error if err := c.createBackendService(ctx, lb); err != nil { return fmt.Errorf("creating backend services: %w", err) } + if err := c.createTargetTCPProxy(ctx, lb); err != nil { + return fmt.Errorf("creating target TCP proxies: %w", err) + } if err := c.createForwardingRules(ctx, lb); err != nil { return fmt.Errorf("creating forwarding rules: %w", err) } @@ -114,9 +126,8 @@ func (c *Client) createLoadBalancer(ctx context.Context, lb *loadBalancer) error } func (c *Client) createHealthCheck(ctx context.Context, lb *loadBalancer) error { - req := &computepb.InsertRegionHealthCheckRequest{ + req := &computepb.InsertHealthCheckRequest{ Project: c.project, - Region: c.region, HealthCheckResource: &computepb.HealthCheck{ Name: proto.String(lb.name), Type: proto.String(computepb.HealthCheck_Type_name[int32(lb.healthCheck)]), @@ -144,18 +155,17 @@ func (c *Client) createHealthCheck(ctx context.Context, lb *loadBalancer) error } func (c *Client) createBackendService(ctx context.Context, lb *loadBalancer) error { - req := &computepb.InsertRegionBackendServiceRequest{ + req := &computepb.InsertBackendServiceRequest{ Project: c.project, - Region: c.region, BackendServiceResource: &computepb.BackendService{ Name: proto.String(lb.name), Protocol: proto.String(computepb.BackendService_Protocol_name[int32(computepb.BackendService_TCP)]), LoadBalancingScheme: proto.String(computepb.BackendService_LoadBalancingScheme_name[int32(computepb.BackendService_EXTERNAL)]), - HealthChecks: []string{c.resourceURI(scopeRegion, "healthChecks", lb.name)}, + HealthChecks: []string{c.resourceURI(scopeGlobal, "healthChecks", lb.name)}, PortName: proto.String(lb.backendPortName), Backends: []*computepb.Backend{ { - BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_CONNECTION)]), + BalancingMode: proto.String(computepb.Backend_BalancingMode_name[int32(computepb.Backend_UTILIZATION)]), Group: proto.String(c.resourceURI(scopeZone, "instanceGroups", c.controlPlaneInstanceGroup)), }, }, @@ -175,16 +185,16 @@ func (c *Client) createBackendService(ctx context.Context, lb *loadBalancer) err } func (c *Client) createForwardingRules(ctx context.Context, lb *loadBalancer) error { - req := &computepb.InsertForwardingRuleRequest{ + req := &computepb.InsertGlobalForwardingRuleRequest{ Project: c.project, - Region: c.region, ForwardingRuleResource: &computepb.ForwardingRule{ Name: proto.String(lb.name), - IPAddress: proto.String(c.resourceURI(scopeRegion, "addresses", c.loadbalancerIPname)), + IPAddress: proto.String(c.resourceURI(scopeGlobal, "addresses", c.loadbalancerIPname)), IPProtocol: proto.String(computepb.ForwardingRule_IPProtocolEnum_name[int32(computepb.ForwardingRule_TCP)]), LoadBalancingScheme: proto.String(computepb.ForwardingRule_LoadBalancingScheme_name[int32(computepb.ForwardingRule_EXTERNAL)]), - Ports: []string{strconv.Itoa(lb.frontendPort)}, - BackendService: proto.String(c.resourceURI(scopeRegion, "backendServices", lb.name)), + PortRange: proto.String(strconv.Itoa(lb.frontendPort)), + + Target: proto.String(c.resourceURI(scopeGlobal, "targetTcpProxies", lb.name)), }, } resp, err := c.forwardingRulesAPI.Insert(ctx, req) @@ -205,9 +215,8 @@ func (c *Client) createForwardingRules(ctx context.Context, lb *loadBalancer) er // labelLoadBalancer labels a load balancer (its forwarding rules) so that it can be found by applications in the cluster. func (c *Client) labelLoadBalancer(ctx context.Context, name string) error { - forwardingRule, err := c.forwardingRulesAPI.Get(ctx, &computepb.GetForwardingRuleRequest{ + forwardingRule, err := c.forwardingRulesAPI.Get(ctx, &computepb.GetGlobalForwardingRuleRequest{ Project: c.project, - Region: c.region, ForwardingRule: name, }) if err != nil { @@ -217,11 +226,10 @@ func (c *Client) labelLoadBalancer(ctx context.Context, name string) error { return fmt.Errorf("forwarding rule %s has no label fingerprint", name) } - resp, err := c.forwardingRulesAPI.SetLabels(ctx, &computepb.SetLabelsForwardingRuleRequest{ + resp, err := c.forwardingRulesAPI.SetLabels(ctx, &computepb.SetLabelsGlobalForwardingRuleRequest{ Project: c.project, - Region: c.region, Resource: name, - RegionSetLabelsRequestResource: &computepb.RegionSetLabelsRequest{ + GlobalSetLabelsRequestResource: &computepb.GlobalSetLabelsRequest{ Labels: map[string]string{"constellation-uid": c.uid}, LabelFingerprint: forwardingRule.LabelFingerprint, }, @@ -233,6 +241,26 @@ func (c *Client) labelLoadBalancer(ctx context.Context, name string) error { return c.waitForOperations(ctx, []Operation{resp}) } +func (c *Client) createTargetTCPProxy(ctx context.Context, lb *loadBalancer) error { + req := &computepb.InsertTargetTcpProxyRequest{ + Project: c.project, + TargetTcpProxyResource: &computepb.TargetTcpProxy{ + Name: proto.String(lb.name), + Service: proto.String(c.resourceURI(scopeGlobal, "backendServices", lb.name)), + }, + } + resp, err := c.targetTCPProxiesAPI.Insert(ctx, req) + if err != nil { + return fmt.Errorf("inserting target tcp proxy: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return err + } + lb.hasTargetTCPProxy = true + return nil +} + // TerminateLoadBalancers terminates all load balancers. func (c *Client) TerminateLoadBalancers(ctx context.Context) error { errC := make(chan error) @@ -276,6 +304,12 @@ func (c *Client) terminateLoadBalancer(ctx context.Context, lb *loadBalancer) er } } + if lb.hasTargetTCPProxy { + if err := c.terminateTargetTCPProxy(ctx, lb); err != nil { + return fmt.Errorf("terminating target tcp proxy: %w", err) + } + } + if lb.hasBackendService { if err := c.terminateBackendService(ctx, lb); err != nil { return fmt.Errorf("terminating backend services: %w", err) @@ -293,9 +327,8 @@ func (c *Client) terminateLoadBalancer(ctx context.Context, lb *loadBalancer) er } func (c *Client) terminateForwadingRules(ctx context.Context, lb *loadBalancer) error { - resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteForwardingRuleRequest{ + resp, err := c.forwardingRulesAPI.Delete(ctx, &computepb.DeleteGlobalForwardingRuleRequest{ Project: c.project, - Region: c.region, ForwardingRule: lb.name, }) if isNotFoundError(err) { @@ -314,10 +347,30 @@ func (c *Client) terminateForwadingRules(ctx context.Context, lb *loadBalancer) return nil } -func (c *Client) terminateBackendService(ctx context.Context, lb *loadBalancer) error { - resp, err := c.backendServicesAPI.Delete(ctx, &computepb.DeleteRegionBackendServiceRequest{ +func (c *Client) terminateTargetTCPProxy(ctx context.Context, lb *loadBalancer) error { + resp, err := c.targetTCPProxiesAPI.Delete(ctx, &computepb.DeleteTargetTcpProxyRequest{ + Project: c.project, + TargetTcpProxy: lb.name, + }) + if isNotFoundError(err) { + lb.hasTargetTCPProxy = false + return nil + } + if err != nil { + return fmt.Errorf("deleting target tcp proxy: %w", err) + } + + if err := c.waitForOperations(ctx, []Operation{resp}); err != nil { + return err + } + + lb.hasTargetTCPProxy = false + return nil +} + +func (c *Client) terminateBackendService(ctx context.Context, lb *loadBalancer) error { + resp, err := c.backendServicesAPI.Delete(ctx, &computepb.DeleteBackendServiceRequest{ Project: c.project, - Region: c.region, BackendService: lb.name, }) if isNotFoundError(err) { @@ -337,9 +390,8 @@ func (c *Client) terminateBackendService(ctx context.Context, lb *loadBalancer) } func (c *Client) terminateHealthCheck(ctx context.Context, lb *loadBalancer) error { - resp, err := c.healthChecksAPI.Delete(ctx, &computepb.DeleteRegionHealthCheckRequest{ + resp, err := c.healthChecksAPI.Delete(ctx, &computepb.DeleteHealthCheckRequest{ Project: c.project, - Region: c.region, HealthCheck: lb.name, }) if isNotFoundError(err) { @@ -360,9 +412,8 @@ func (c *Client) terminateHealthCheck(ctx context.Context, lb *loadBalancer) err func (c *Client) createIPAddr(ctx context.Context) error { ipName := c.buildResourceName() - insertReq := &computepb.InsertAddressRequest{ + insertReq := &computepb.InsertGlobalAddressRequest{ Project: c.project, - Region: c.region, AddressResource: &computepb.Address{ Name: proto.String(ipName), }, @@ -376,9 +427,8 @@ func (c *Client) createIPAddr(ctx context.Context) error { } c.loadbalancerIPname = ipName - getReq := &computepb.GetAddressRequest{ + getReq := &computepb.GetGlobalAddressRequest{ Project: c.project, - Region: c.region, Address: c.loadbalancerIPname, } addr, err := c.addressesAPI.Get(ctx, getReq) @@ -398,9 +448,8 @@ func (c *Client) deleteIPAddr(ctx context.Context) error { return nil } - req := &computepb.DeleteAddressRequest{ + req := &computepb.DeleteGlobalAddressRequest{ Project: c.project, - Region: c.region, Address: c.loadbalancerIPname, } op, err := c.addressesAPI.Delete(ctx, req) diff --git a/cli/internal/gcp/client/loadbalancer_test.go b/cli/internal/gcp/client/loadbalancer_test.go index bcf2ef9e7..3ef9ae876 100644 --- a/cli/internal/gcp/client/loadbalancer_test.go +++ b/cli/internal/gcp/client/loadbalancer_test.go @@ -27,41 +27,55 @@ func TestCreateLoadBalancers(t *testing.T) { addrAPI addressesAPI healthAPI healthChecksAPI backendAPI backendServicesAPI + proxyAPI targetTCPProxiesAPI forwardAPI forwardingRulesAPI - opRegAPI operationRegionAPI + operationAPI operationGlobalAPI isDebugCluster bool wantErr bool }{ "successful create": { - addrAPI: &stubAddressesAPI{getAddr: proto.String("192.0.2.1")}, - healthAPI: &stubHealthChecksAPI{}, - backendAPI: &stubBackendServicesAPI{}, - forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, - opRegAPI: stubOperationRegionAPI{}, + addrAPI: &stubAddressesAPI{getAddr: proto.String("192.0.2.1")}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + proxyAPI: &stubTargetTCPProxiesAPI{}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + operationAPI: stubOperationGlobalAPI{}, }, "successful create (debug cluster)": { addrAPI: &stubAddressesAPI{getAddr: proto.String("192.0.2.1")}, healthAPI: &stubHealthChecksAPI{}, backendAPI: &stubBackendServicesAPI{}, + proxyAPI: &stubTargetTCPProxiesAPI{}, forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, - opRegAPI: stubOperationRegionAPI{}, + operationAPI: stubOperationGlobalAPI{}, isDebugCluster: true, }, "createIPAddr fails": { - addrAPI: &stubAddressesAPI{insertErr: someErr}, - healthAPI: &stubHealthChecksAPI{}, - backendAPI: &stubBackendServicesAPI{}, - forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, - opRegAPI: stubOperationRegionAPI{}, - wantErr: true, + addrAPI: &stubAddressesAPI{insertErr: someErr}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + proxyAPI: &stubTargetTCPProxiesAPI{}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + operationAPI: stubOperationGlobalAPI{}, + wantErr: true, }, "createLB fails": { - addrAPI: &stubAddressesAPI{}, - healthAPI: &stubHealthChecksAPI{}, - backendAPI: &stubBackendServicesAPI{insertErr: someErr}, - forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, - opRegAPI: stubOperationRegionAPI{}, - wantErr: true, + addrAPI: &stubAddressesAPI{}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{insertErr: someErr}, + proxyAPI: &stubTargetTCPProxiesAPI{}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + operationAPI: stubOperationGlobalAPI{}, + wantErr: true, + }, + "createTcpProxy fails": { + addrAPI: &stubAddressesAPI{getAddr: proto.String("192.0.2.1")}, + healthAPI: &stubHealthChecksAPI{}, + backendAPI: &stubBackendServicesAPI{}, + proxyAPI: &stubTargetTCPProxiesAPI{insertErr: someErr}, + forwardAPI: &stubForwardingRulesAPI{forwardingRule: forwardingRule}, + operationAPI: stubOperationGlobalAPI{}, + wantErr: true, }, } @@ -71,15 +85,16 @@ func TestCreateLoadBalancers(t *testing.T) { ctx := context.Background() client := Client{ - project: "project", - zone: "zone", - name: "name", - uid: "uid", - addressesAPI: tc.addrAPI, - healthChecksAPI: tc.healthAPI, - backendServicesAPI: tc.backendAPI, - forwardingRulesAPI: tc.forwardAPI, - operationRegionAPI: tc.opRegAPI, + project: "project", + zone: "zone", + name: "name", + uid: "uid", + addressesAPI: tc.addrAPI, + targetTCPProxiesAPI: tc.proxyAPI, + healthChecksAPI: tc.healthAPI, + backendServicesAPI: tc.backendAPI, + forwardingRulesAPI: tc.forwardAPI, + operationGlobalAPI: tc.operationAPI, } err := client.CreateLoadBalancers(ctx, tc.isDebugCluster) @@ -104,10 +119,10 @@ func TestCreateLoadBalancers(t *testing.T) { } if tc.isDebugCluster { - assert.Equal(4, len(client.loadbalancers)) + assert.Equal(5, len(client.loadbalancers)) assert.True(foundDebugdLB, "debugd loadbalancer not found in debug-mode") } else { - assert.Equal(3, len(client.loadbalancers)) + assert.Equal(4, len(client.loadbalancers)) assert.False(foundDebugdLB, "debugd loadbalancer found in non-debug mode") } }) @@ -117,110 +132,141 @@ func TestCreateLoadBalancers(t *testing.T) { func TestCreateLoadBalancer(t *testing.T) { someErr := errors.New("failed") testCases := map[string]struct { - operationRegionAPI operationRegionAPI - healthChecksAPI healthChecksAPI - backendServicesAPI backendServicesAPI - forwardingRulesAPI forwardingRulesAPI - wantErr bool - wantLB *loadBalancer + operationGlobalAPI operationGlobalAPI + healthChecksAPI healthChecksAPI + backendServicesAPI backendServicesAPI + forwardingRulesAPI forwardingRulesAPI + targetTCPProxiesAPI targetTCPProxiesAPI + wantErr bool + wantLB *loadBalancer }{ "successful create": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{}, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationGlobalAPI: stubOperationGlobalAPI{}, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, backendPortName: "testport", hasHealthCheck: true, + hasTargetTCPProxy: true, hasBackendService: true, hasForwardingRules: true, }, }, "successful create with label": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{}, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationGlobalAPI: stubOperationGlobalAPI{}, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, backendPortName: "testport", label: true, hasHealthCheck: true, + hasTargetTCPProxy: true, hasBackendService: true, hasForwardingRules: true, }, }, "CreateLoadBalancer fails when getting forwarding rule": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{getErr: someErr}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{getErr: someErr}, + operationGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, backendPortName: "testport", label: true, hasHealthCheck: true, + hasTargetTCPProxy: true, hasBackendService: true, hasForwardingRules: true, }, }, "CreateLoadBalancer fails when label fingerprint is missing": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{}}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{}}, + operationGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, backendPortName: "testport", label: true, hasHealthCheck: true, + hasTargetTCPProxy: true, hasBackendService: true, hasForwardingRules: true, }, }, "CreateLoadBalancer fails when creating health check": { - healthChecksAPI: stubHealthChecksAPI{insertErr: someErr}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + healthChecksAPI: stubHealthChecksAPI{insertErr: someErr}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, backendPortName: "testport", hasHealthCheck: false, + hasTargetTCPProxy: false, hasBackendService: false, hasForwardingRules: false, }, }, "CreateLoadBalancer fails when creating backend service": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{insertErr: someErr}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + healthChecksAPI: stubHealthChecksAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + backendServicesAPI: stubBackendServicesAPI{insertErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + operationGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, backendPortName: "testport", hasHealthCheck: true, hasBackendService: false, + hasTargetTCPProxy: false, hasForwardingRules: false, }, }, "CreateLoadBalancer fails when creating forwarding rule": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{insertErr: someErr}, - operationRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{insertErr: someErr}, + operationGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + frontendPort: 1234, + backendPortName: "testport", + hasHealthCheck: true, + hasBackendService: true, + hasTargetTCPProxy: true, + hasForwardingRules: false, + }, + }, + "CreateLoadBalancer fails when creating target proxy rule": { + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{insertErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + operationGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, @@ -231,11 +277,12 @@ func TestCreateLoadBalancer(t *testing.T) { }, }, "CreateLoadBalancer fails when waiting on operation": { - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, - operationRegionAPI: stubOperationRegionAPI{waitErr: someErr}, - wantErr: true, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{forwardingRule: &compute.ForwardingRule{LabelFingerprint: proto.String("fingerprint")}}, + operationGlobalAPI: stubOperationGlobalAPI{waitErr: someErr}, + wantErr: true, wantLB: &loadBalancer{ name: "name", frontendPort: 1234, @@ -253,14 +300,15 @@ func TestCreateLoadBalancer(t *testing.T) { ctx := context.Background() client := Client{ - project: "project", - zone: "zone", - name: "name", - uid: "uid", - backendServicesAPI: tc.backendServicesAPI, - forwardingRulesAPI: tc.forwardingRulesAPI, - healthChecksAPI: tc.healthChecksAPI, - operationRegionAPI: tc.operationRegionAPI, + project: "project", + zone: "zone", + name: "name", + uid: "uid", + backendServicesAPI: tc.backendServicesAPI, + forwardingRulesAPI: tc.forwardingRulesAPI, + targetTCPProxiesAPI: tc.targetTCPProxiesAPI, + healthChecksAPI: tc.healthChecksAPI, + operationGlobalAPI: tc.operationGlobalAPI, } lb := &loadBalancer{ name: tc.wantLB.name, @@ -289,6 +337,7 @@ func TestTerminateLoadbalancers(t *testing.T) { name: "name", hasHealthCheck: true, hasBackendService: true, + hasTargetTCPProxy: true, hasForwardingRules: true, } } @@ -297,31 +346,35 @@ func TestTerminateLoadbalancers(t *testing.T) { addrAPI addressesAPI healthAPI healthChecksAPI backendAPI backendServicesAPI + targetAPI targetTCPProxiesAPI forwardAPI forwardingRulesAPI - opRegionAPI operationRegionAPI + opGlobalAPI operationGlobalAPI wantErr bool }{ "successful terminate": { addrAPI: &stubAddressesAPI{}, healthAPI: &stubHealthChecksAPI{}, backendAPI: &stubBackendServicesAPI{}, + targetAPI: &stubTargetTCPProxiesAPI{}, forwardAPI: &stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, }, "deleteIPAddr fails": { addrAPI: &stubAddressesAPI{deleteErr: someErr}, healthAPI: &stubHealthChecksAPI{}, backendAPI: &stubBackendServicesAPI{}, + targetAPI: &stubTargetTCPProxiesAPI{}, forwardAPI: &stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, wantErr: true, }, "deleteLB fails": { addrAPI: &stubAddressesAPI{}, healthAPI: &stubHealthChecksAPI{}, backendAPI: &stubBackendServicesAPI{deleteErr: someErr}, + targetAPI: &stubTargetTCPProxiesAPI{}, forwardAPI: &stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, wantErr: true, }, } @@ -332,16 +385,17 @@ func TestTerminateLoadbalancers(t *testing.T) { ctx := context.Background() client := Client{ - project: "project", - zone: "zone", - name: "name", - uid: "uid", - addressesAPI: tc.addrAPI, - healthChecksAPI: tc.healthAPI, - backendServicesAPI: tc.backendAPI, - forwardingRulesAPI: tc.forwardAPI, - operationRegionAPI: tc.opRegionAPI, - loadbalancerIPname: "loadbalancerIPid", + project: "project", + zone: "zone", + name: "name", + uid: "uid", + addressesAPI: tc.addrAPI, + healthChecksAPI: tc.healthAPI, + backendServicesAPI: tc.backendAPI, + targetTCPProxiesAPI: tc.targetAPI, + forwardingRulesAPI: tc.forwardAPI, + operationGlobalAPI: tc.opGlobalAPI, + loadbalancerIPname: "loadbalancerIPid", loadbalancers: []*loadBalancer{ newRunningLB(), newRunningLB(), @@ -369,27 +423,30 @@ func TestTerminateLoadBalancer(t *testing.T) { return &loadBalancer{ name: "name", hasHealthCheck: true, + hasTargetTCPProxy: true, hasBackendService: true, hasForwardingRules: true, } } testCases := map[string]struct { - lb *loadBalancer - opRegionAPI operationRegionAPI - healthChecksAPI healthChecksAPI - backendServicesAPI backendServicesAPI - forwardingRulesAPI forwardingRulesAPI - wantErr bool - wantLB *loadBalancer + lb *loadBalancer + opGlobalAPI operationGlobalAPI + healthChecksAPI healthChecksAPI + backendServicesAPI backendServicesAPI + targetTCPProxiesAPI targetTCPProxiesAPI + forwardingRulesAPI forwardingRulesAPI + wantErr bool + wantLB *loadBalancer }{ "successful terminate": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, - wantLB: &loadBalancer{}, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantLB: &loadBalancer{}, }, "terminate partially created loadbalancer": { lb: &loadBalancer{ @@ -398,11 +455,12 @@ func TestTerminateLoadBalancer(t *testing.T) { hasBackendService: true, hasForwardingRules: false, }, - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, - opRegionAPI: stubOperationRegionAPI{}, - wantLB: &loadBalancer{}, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantLB: &loadBalancer{}, }, "terminate partially created loadbalancer 2": { lb: &loadBalancer{ @@ -411,38 +469,42 @@ func TestTerminateLoadBalancer(t *testing.T) { hasBackendService: false, hasForwardingRules: false, }, - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, - forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, - opRegionAPI: stubOperationRegionAPI{}, - wantLB: &loadBalancer{}, + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantLB: &loadBalancer{}, }, "no-op for nil loadbalancer": { lb: nil, }, "health check not found": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{deleteErr: notFoundErr}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, - wantLB: &loadBalancer{}, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{deleteErr: notFoundErr}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantLB: &loadBalancer{}, }, "backend service not found": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{deleteErr: notFoundErr}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, - wantLB: &loadBalancer{}, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{deleteErr: notFoundErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantLB: &loadBalancer{}, }, "forwarding rules not found": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: notFoundErr}, - opRegionAPI: stubOperationRegionAPI{}, - wantLB: &loadBalancer{}, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: notFoundErr}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantLB: &loadBalancer{}, }, "fails for loadbalancer without name": { lb: &loadBalancer{}, @@ -450,59 +512,83 @@ func TestTerminateLoadBalancer(t *testing.T) { wantLB: &loadBalancer{}, }, "fails when deleting health check": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{deleteErr: someErr}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{deleteErr: someErr}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", hasHealthCheck: true, hasBackendService: false, hasForwardingRules: false, + hasTargetTCPProxy: false, }, }, "fails when deleting backend service": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{deleteErr: someErr}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", hasHealthCheck: true, hasBackendService: true, hasForwardingRules: false, + hasTargetTCPProxy: false, }, }, "fails when deleting forwarding rule": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, - opRegionAPI: stubOperationRegionAPI{}, - wantErr: true, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{deleteErr: someErr}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, wantLB: &loadBalancer{ name: "name", hasHealthCheck: true, hasBackendService: true, hasForwardingRules: true, + hasTargetTCPProxy: true, + }, + }, + "fails when deleting tcp proxy rule": { + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{deleteErr: someErr}, + opGlobalAPI: stubOperationGlobalAPI{}, + wantErr: true, + wantLB: &loadBalancer{ + name: "name", + hasHealthCheck: true, + hasBackendService: true, + hasForwardingRules: false, + hasTargetTCPProxy: true, }, }, "fails when waiting on operation": { - lb: newRunningLB(), - healthChecksAPI: stubHealthChecksAPI{}, - backendServicesAPI: stubBackendServicesAPI{}, - forwardingRulesAPI: stubForwardingRulesAPI{}, - opRegionAPI: stubOperationRegionAPI{waitErr: someErr}, - wantErr: true, + lb: newRunningLB(), + healthChecksAPI: stubHealthChecksAPI{}, + backendServicesAPI: stubBackendServicesAPI{}, + forwardingRulesAPI: stubForwardingRulesAPI{}, + targetTCPProxiesAPI: stubTargetTCPProxiesAPI{}, + opGlobalAPI: stubOperationGlobalAPI{waitErr: someErr}, + wantErr: true, wantLB: &loadBalancer{ name: "name", hasHealthCheck: true, hasBackendService: true, hasForwardingRules: true, + hasTargetTCPProxy: true, }, }, } @@ -513,14 +599,15 @@ func TestTerminateLoadBalancer(t *testing.T) { ctx := context.Background() client := Client{ - project: "project", - zone: "zone", - name: "name", - uid: "uid", - backendServicesAPI: tc.backendServicesAPI, - forwardingRulesAPI: tc.forwardingRulesAPI, - healthChecksAPI: tc.healthChecksAPI, - operationRegionAPI: tc.opRegionAPI, + project: "project", + zone: "zone", + name: "name", + uid: "uid", + backendServicesAPI: tc.backendServicesAPI, + forwardingRulesAPI: tc.forwardingRulesAPI, + healthChecksAPI: tc.healthChecksAPI, + targetTCPProxiesAPI: tc.targetTCPProxiesAPI, + operationGlobalAPI: tc.opGlobalAPI, } err := client.terminateLoadBalancer(ctx, tc.lb) @@ -541,31 +628,31 @@ func TestCreateIPAddr(t *testing.T) { testCases := map[string]struct { addrAPI addressesAPI - opAPI operationRegionAPI + opAPI operationGlobalAPI wantErr bool }{ "successful create": { addrAPI: stubAddressesAPI{getAddr: proto.String("test-ip")}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, }, "insert fails": { addrAPI: stubAddressesAPI{insertErr: someErr}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, wantErr: true, }, "get fails": { addrAPI: stubAddressesAPI{getErr: someErr}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, wantErr: true, }, "get address nil": { addrAPI: stubAddressesAPI{getAddr: nil}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, wantErr: true, }, "wait fails": { addrAPI: stubAddressesAPI{}, - opAPI: stubOperationRegionAPI{waitErr: someErr}, + opAPI: stubOperationGlobalAPI{waitErr: someErr}, wantErr: true, }, } @@ -581,7 +668,7 @@ func TestCreateIPAddr(t *testing.T) { name: "name", uid: "uid", addressesAPI: tc.addrAPI, - operationRegionAPI: tc.opAPI, + operationGlobalAPI: tc.opAPI, } err := client.createIPAddr(ctx) @@ -603,33 +690,33 @@ func TestDeleteIPAddr(t *testing.T) { testCases := map[string]struct { addrAPI addressesAPI - opAPI operationRegionAPI + opAPI operationGlobalAPI addrID string wantErr bool }{ "successful delete": { addrAPI: stubAddressesAPI{}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, addrID: "name", }, "not found": { addrAPI: stubAddressesAPI{deleteErr: notFoundErr}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, addrID: "name", }, "empty is no-op": { addrAPI: stubAddressesAPI{}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, }, "delete fails": { addrAPI: stubAddressesAPI{deleteErr: someErr}, - opAPI: stubOperationRegionAPI{}, + opAPI: stubOperationGlobalAPI{}, addrID: "name", wantErr: true, }, "wait fails": { addrAPI: stubAddressesAPI{}, - opAPI: stubOperationRegionAPI{waitErr: someErr}, + opAPI: stubOperationGlobalAPI{waitErr: someErr}, addrID: "name", wantErr: true, }, @@ -646,7 +733,7 @@ func TestDeleteIPAddr(t *testing.T) { name: "name", uid: "uid", addressesAPI: tc.addrAPI, - operationRegionAPI: tc.opAPI, + operationGlobalAPI: tc.opAPI, loadbalancerIPname: tc.addrID, } diff --git a/debugd/cmd/debugd/debugd.go b/debugd/cmd/debugd/debugd.go index 7815b269e..2f8c6a8fa 100644 --- a/debugd/cmd/debugd/debugd.go +++ b/debugd/cmd/debugd/debugd.go @@ -23,9 +23,7 @@ import ( platform "github.com/edgelesssys/constellation/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/internal/deploy/ssh" "github.com/edgelesssys/constellation/internal/deploy/user" - "github.com/edgelesssys/constellation/internal/iproute" "github.com/edgelesssys/constellation/internal/logger" - "github.com/edgelesssys/constellation/internal/role" "github.com/spf13/afero" ) @@ -69,9 +67,6 @@ func main() { log.Fatalf("%s", err) } fetcher = gcpFetcher - if err := setLoadbalancerRoute(ctx, fetcher); err != nil { - log.Errorf("adding load balancer IP to local routing table: %s", err) - } log.Infof("Added load balancer IP to local routing table") case platform.QEMU: fetcher = cloudprovider.NewQEMU() @@ -108,18 +103,3 @@ func writeDebugBanner(log *logger.Logger) { log.Infof("Unable to print to /dev/ttyS0: %v", err) } } - -func setLoadbalancerRoute(ctx context.Context, fetcher metadata.Fetcher) error { - ownRole, err := fetcher.Role(ctx) - if err != nil { - return err - } - if ownRole != role.ControlPlane { - return nil - } - ip, err := fetcher.DiscoverLoadbalancerIP(ctx) - if err != nil { - return err - } - return iproute.AddToLocalRoutingTable(ctx, ip) -} diff --git a/internal/cloud/gcp/api.go b/internal/cloud/gcp/api.go index b9902d622..6e2262165 100644 --- a/internal/cloud/gcp/api.go +++ b/internal/cloud/gcp/api.go @@ -29,7 +29,7 @@ type subnetworkAPI interface { } type forwardingRulesAPI interface { - List(ctx context.Context, req *computepb.ListForwardingRulesRequest, opts ...gax.CallOption) ForwardingRuleIterator + List(ctx context.Context, req *computepb.ListGlobalForwardingRulesRequest, opts ...gax.CallOption) ForwardingRuleIterator Close() error } diff --git a/internal/cloud/gcp/client.go b/internal/cloud/gcp/client.go index 9615d6112..af628f498 100644 --- a/internal/cloud/gcp/client.go +++ b/internal/cloud/gcp/client.go @@ -47,7 +47,7 @@ func NewClient(ctx context.Context) (*Client, error) { if err != nil { return nil, err } - forwardingRulesAPI, err := compute.NewForwardingRulesRESTClient(ctx) + forwardingRulesAPI, err := compute.NewGlobalForwardingRulesRESTClient(ctx) if err != nil { return nil, err } @@ -222,20 +222,14 @@ func (c *Client) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, } // RetrieveLoadBalancerEndpoint returns the endpoint of the load balancer with the constellation-uid tag. -func (c *Client) RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone string) (string, error) { +func (c *Client) RetrieveLoadBalancerEndpoint(ctx context.Context, project string) (string, error) { uid, err := c.UID() if err != nil { return "", err } - region := zoneFromRegionRegex.FindString(zone) - if region == "" { - return "", fmt.Errorf("invalid zone %s", zone) - } - - req := &computepb.ListForwardingRulesRequest{ + req := &computepb.ListGlobalForwardingRulesRequest{ Project: project, - Region: region, } iter := c.forwardingRulesAPI.List(ctx, req) for { @@ -247,10 +241,11 @@ func (c *Client) RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone return "", fmt.Errorf("retrieving load balancer IP failed: %w", err) } if resp.Labels["constellation-uid"] == uid { - if len(resp.Ports) == 0 { + if resp.PortRange == nil { return "", errors.New("load balancer with searched UID has no ports") } - return net.JoinHostPort(*resp.IPAddress, resp.Ports[0]), nil + portRange := strings.Split(*resp.PortRange, "-") + return net.JoinHostPort(*resp.IPAddress, portRange[0]), nil } } diff --git a/internal/cloud/gcp/client_test.go b/internal/cloud/gcp/client_test.go index 3e2c82e91..fdfb46464 100644 --- a/internal/cloud/gcp/client_test.go +++ b/internal/cloud/gcp/client_test.go @@ -796,7 +796,7 @@ func TestRetrieveLoadBalancerEndpoint(t *testing.T) { rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), - Ports: []string{"100"}, + PortRange: proto.String("100-100"), Labels: map[string]string{"constellation-uid": uid}, }, }, @@ -811,7 +811,7 @@ func TestRetrieveLoadBalancerEndpoint(t *testing.T) { rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), - Ports: []string{"100"}, + PortRange: proto.String("100-100"), }, }, }, @@ -825,7 +825,7 @@ func TestRetrieveLoadBalancerEndpoint(t *testing.T) { rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), - Ports: []string{"100"}, + PortRange: proto.String("100-100"), Labels: map[string]string{"constellation-uid": uid}, }, }, @@ -855,7 +855,7 @@ func TestRetrieveLoadBalancerEndpoint(t *testing.T) { rules: []*computepb.ForwardingRule{ { IPAddress: proto.String(loadBalancerIP), - Ports: []string{"100"}, + PortRange: proto.String("100-100"), Labels: map[string]string{"constellation-uid": uid}, }, }, @@ -870,7 +870,7 @@ func TestRetrieveLoadBalancerEndpoint(t *testing.T) { require := require.New(t) client := Client{forwardingRulesAPI: tc.stubForwardingRulesClient, metadataAPI: tc.stubMetadataClient} - aliasCIDR, err := client.RetrieveLoadBalancerEndpoint(context.Background(), "project", "us-central1-a") + aliasCIDR, err := client.RetrieveLoadBalancerEndpoint(context.Background(), "project") if tc.wantErr { assert.Error(err) @@ -1038,7 +1038,7 @@ type stubForwardingRulesClient struct { CloseErr error } -func (s stubForwardingRulesClient) List(ctx context.Context, req *computepb.ListForwardingRulesRequest, opts ...gax.CallOption) ForwardingRuleIterator { +func (s stubForwardingRulesClient) List(ctx context.Context, req *computepb.ListGlobalForwardingRulesRequest, opts ...gax.CallOption) ForwardingRuleIterator { return s.ForwardingRuleIterator } diff --git a/internal/cloud/gcp/metadata.go b/internal/cloud/gcp/metadata.go index bd3bebf3d..34e61ad3a 100644 --- a/internal/cloud/gcp/metadata.go +++ b/internal/cloud/gcp/metadata.go @@ -33,7 +33,7 @@ type API interface { // RetrieveSubnetworkAliasCIDR retrieves the subnetwork CIDR of the current instance. RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) // RetrieveLoadBalancerEndpoint retrieves the load balancer endpoint of the current instance. - RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone string) (string, error) + RetrieveLoadBalancerEndpoint(ctx context.Context, project string) (string, error) // SetInstanceMetadata sets metadata key: value of the instance specified by project, zone and instanceName. SetInstanceMetadata(ctx context.Context, project, zone, instanceName, key, value string) error // UnsetInstanceMetadata removes a metadata key-value pair of the instance specified by project, zone and instanceName. @@ -123,11 +123,7 @@ func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) if err != nil { return "", err } - zone, err := m.api.RetrieveZone() - if err != nil { - return "", err - } - return m.api.RetrieveLoadBalancerEndpoint(ctx, project, zone) + return m.api.RetrieveLoadBalancerEndpoint(ctx, project) } // UID retrieves the UID of the constellation. diff --git a/internal/cloud/gcp/metadata_test.go b/internal/cloud/gcp/metadata_test.go index 1a7513be9..283b88d6c 100644 --- a/internal/cloud/gcp/metadata_test.go +++ b/internal/cloud/gcp/metadata_test.go @@ -293,7 +293,7 @@ func (s *stubGCPClient) RetrieveInstanceName() (string, error) { return s.instanceName, s.retrieveInstanceNameErr } -func (s *stubGCPClient) RetrieveLoadBalancerEndpoint(ctx context.Context, project, zone string) (string, error) { +func (s *stubGCPClient) RetrieveLoadBalancerEndpoint(ctx context.Context, project string) (string, error) { return s.loadBalancerIP, s.retrieveLoadBalancerErr } diff --git a/internal/cloud/gcp/wrappers.go b/internal/cloud/gcp/wrappers.go index e4b20cff5..cc51239b4 100644 --- a/internal/cloud/gcp/wrappers.go +++ b/internal/cloud/gcp/wrappers.go @@ -50,17 +50,17 @@ func (c *subnetworkClient) Get(ctx context.Context, req *computepb.GetSubnetwork } type forwardingRulesClient struct { - *compute.ForwardingRulesClient + *compute.GlobalForwardingRulesClient } func (c *forwardingRulesClient) Close() error { - return c.ForwardingRulesClient.Close() + return c.GlobalForwardingRulesClient.Close() } -func (c *forwardingRulesClient) List(ctx context.Context, req *computepb.ListForwardingRulesRequest, +func (c *forwardingRulesClient) List(ctx context.Context, req *computepb.ListGlobalForwardingRulesRequest, opts ...gax.CallOption, ) ForwardingRuleIterator { - return c.ForwardingRulesClient.List(ctx, req) + return c.GlobalForwardingRulesClient.List(ctx, req) } type metadataClient struct{} diff --git a/internal/constants/constants.go b/internal/constants/constants.go index 64181c412..b96ec717b 100644 --- a/internal/constants/constants.go +++ b/internal/constants/constants.go @@ -47,6 +47,7 @@ const ( SSHPort = 22 NVMEOverTCPPort = 8009 DebugdPort = 4000 + KonnectivityPort = 8132 // Default NodePort Range // https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport NodePortFrom = 30000 diff --git a/internal/constants/firewall.go b/internal/constants/firewall.go index 02522aff9..413ae58d8 100644 --- a/internal/constants/firewall.go +++ b/internal/constants/firewall.go @@ -42,6 +42,13 @@ var ( IPRange: "0.0.0.0/0", FromPort: KubernetesPort, }, + { + Name: "konnectivity", + Description: "konnectivity", + Protocol: "tcp", + IPRange: "0.0.0.0/0", + FromPort: KonnectivityPort, + }, } // IngressRulesDebug is the default set of ingress rules for a Constellation cluster with debug mode. diff --git a/internal/grpc/retry/retry.go b/internal/grpc/retry/retry.go index 394a6ede4..5ab9522f2 100644 --- a/internal/grpc/retry/retry.go +++ b/internal/grpc/retry/retry.go @@ -36,6 +36,11 @@ func ServiceIsUnavailable(err error) bool { return false } + // retry if GCP proxy LB isn't fully available yet + if strings.HasPrefix(statusErr.Message(), `connection error: desc = "transport: authentication handshake failed: EOF"`) { + return true + } + // ideally we would check the error type directly, but grpc only provides a string return !strings.HasPrefix(statusErr.Message(), `connection error: desc = "transport: authentication handshake failed`) } diff --git a/internal/iproute/route.go b/internal/iproute/route.go deleted file mode 100644 index a0ed43bc7..000000000 --- a/internal/iproute/route.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: AGPL-3.0-only -*/ - -package iproute - -import ( - "context" - "errors" - "fmt" - "os/exec" - "strings" -) - -// AddToLocalRoutingTable adds the IP to the local routing table. -func AddToLocalRoutingTable(ctx context.Context, ip string) error { - return manipulateLocalRoutingTable(ctx, "add", ip) -} - -// RemoveFromLocalRoutingTable removes the IPfrom the local routing table. -func RemoveFromLocalRoutingTable(ctx context.Context, ip string) error { - return manipulateLocalRoutingTable(ctx, "del", ip) -} - -func manipulateLocalRoutingTable(ctx context.Context, action string, ip string) error { - // https://github.com/GoogleCloudPlatform/guest-agent/blob/792fce795218633bcbde505fb3457a0b24f26d37/google_guest_agent/addresses.go#L179 - if !strings.Contains(ip, "/") { - ip = ip + "/32" - } - - args := []string{"route", action, "to", "local", ip, "scope", "host", "dev", "ens3", "proto", "66"} - _, err := exec.CommandContext(ctx, "ip", args...).Output() - if err == nil { - return nil - } - var exitErr *exec.ExitError - if !errors.As(err, &exitErr) { - return fmt.Errorf("ip route %s: %w", action, err) - } - if exitErr.ExitCode() == 2 { - // "RTNETLINK answers: File exists" or "RTNETLINK answers: No such process" - // - // Ignore, expected in case of adding an existing route or deleting a route - // that does not exist. - return nil - } - return fmt.Errorf("ip route %s (code %v) with: %s", action, exitErr.ExitCode(), exitErr.Stderr) -} diff --git a/internal/versions/versions.go b/internal/versions/versions.go index 47acd47c8..27709d441 100644 --- a/internal/versions/versions.go +++ b/internal/versions/versions.go @@ -43,6 +43,8 @@ func IsPreviewK8sVersion(version ValidK8sVersion) bool { const ( // Constellation images. // These images are built in a way that they support all versions currently listed in VersionConfigs. + KonnectivityAgentImage = "us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.32" + KonnectivityServerImage = "registry.k8s.io/kas-network-proxy/proxy-server:v0.0.32" JoinImage = "ghcr.io/edgelesssys/constellation/join-service:v0.0.2-0.20220905091720-bd6c6ce836af" AccessManagerImage = "ghcr.io/edgelesssys/constellation/access-manager:v0.0.1" KmsImage = "ghcr.io/edgelesssys/constellation/kmsserver:v0.0.2-0.20220831181049-47d4c9e30423"