mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-12-15 08:05:19 -05:00
bootstrapper: add fallback endpoint and custom endpoint to SAN field (#2108)
terraform: collect apiserver cert SANs and support custom endpoint constants: add new constants for cluster configuration and custom endpoint cloud: support apiserver cert sans and prepare for endpoint migration on AWS config: add customEndpoint field bootstrapper: use per-CSP apiserver cert SANs cli: route customEndpoint to terraform and add migration for apiserver cert SANs bootstrapper: change interface of GetLoadBalancerEndpoint to return host and port separately
This commit is contained in:
parent
3324a4eba2
commit
8da6a23aa5
64 changed files with 724 additions and 301 deletions
|
|
@ -19,12 +19,12 @@ type ProviderMetadata interface {
|
|||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (metadata.InstanceMetadata, error)
|
||||
// GetLoadBalancerEndpoint retrieves the load balancer endpoint.
|
||||
GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
||||
GetLoadBalancerEndpoint(ctx context.Context) (host, port string, err error)
|
||||
}
|
||||
|
||||
type stubProviderMetadata struct {
|
||||
getLoadBalancerEndpointErr error
|
||||
getLoadBalancerEndpointResp string
|
||||
getLoadBalancerEndpointErr error
|
||||
getLoadBalancerHostResp, getLoadBalancerPortResp string
|
||||
|
||||
selfErr error
|
||||
selfResp metadata.InstanceMetadata
|
||||
|
|
@ -33,8 +33,8 @@ type stubProviderMetadata struct {
|
|||
uidResp string
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) GetLoadBalancerEndpoint(_ context.Context) (string, error) {
|
||||
return m.getLoadBalancerEndpointResp, m.getLoadBalancerEndpointErr
|
||||
func (m *stubProviderMetadata) GetLoadBalancerEndpoint(_ context.Context) (string, string, error) {
|
||||
return m.getLoadBalancerHostResp, m.getLoadBalancerPortResp, m.getLoadBalancerEndpointErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) Self(_ context.Context) (metadata.InstanceMetadata, error) {
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesCompon
|
|||
// InitCluster instruments kubeadm to initialize the K8s cluster.
|
||||
// On success an admin kubeconfig file is returned.
|
||||
func (k *KubernetesUtil) InitCluster(
|
||||
ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger,
|
||||
ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, controlPlaneHost, controlPlanePort string, conformanceMode bool, log *logger.Logger,
|
||||
) ([]byte, error) {
|
||||
// TODO(3u13r): audit policy should be user input
|
||||
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
|
||||
|
|
@ -148,6 +148,7 @@ func (k *KubernetesUtil) InitCluster(
|
|||
}
|
||||
|
||||
log.Infof("Preparing node for Konnectivity")
|
||||
controlPlaneEndpoint := net.JoinHostPort(controlPlaneHost, controlPlanePort)
|
||||
if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint); err != nil {
|
||||
return nil, fmt.Errorf("setup konnectivity: %w", err)
|
||||
}
|
||||
|
|
@ -241,11 +242,12 @@ func (k *KubernetesUtil) prepareControlPlaneForKonnectivity(ctx context.Context,
|
|||
|
||||
// SetupPodNetworkInput holds all configuration options to setup the pod network.
|
||||
type SetupPodNetworkInput struct {
|
||||
CloudProvider string
|
||||
NodeName string
|
||||
FirstNodePodCIDR string
|
||||
SubnetworkPodCIDR string
|
||||
LoadBalancerEndpoint string
|
||||
CloudProvider string
|
||||
NodeName string
|
||||
FirstNodePodCIDR string
|
||||
SubnetworkPodCIDR string
|
||||
LoadBalancerHost string
|
||||
LoadBalancerPort string
|
||||
}
|
||||
|
||||
// WaitForCilium waits until Cilium reports a healthy status over its /healthz endpoint.
|
||||
|
|
@ -314,7 +316,7 @@ func (k *KubernetesUtil) FixCilium(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
|
||||
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error {
|
||||
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneHost, controlPlanePort string, log *logger.Logger) error {
|
||||
// TODO(3u13r): audit policy should be user input
|
||||
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
|
||||
if err != nil {
|
||||
|
|
@ -341,6 +343,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, pee
|
|||
|
||||
if peerRole == role.ControlPlane {
|
||||
log.Infof("Prep Init Kubernetes cluster")
|
||||
controlPlaneEndpoint := net.JoinHostPort(controlPlaneHost, controlPlanePort)
|
||||
if err := k.prepareControlPlaneForKonnectivity(ctx, controlPlaneEndpoint); err != nil {
|
||||
return fmt.Errorf("setup konnectivity: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ import (
|
|||
|
||||
type clusterUtil interface {
|
||||
InstallComponents(ctx context.Context, kubernetesComponents components.Components) error
|
||||
InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) ([]byte, error)
|
||||
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
|
||||
InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, controlPlaneHost, controlPlanePort string, conformanceMode bool, log *logger.Logger) ([]byte, error)
|
||||
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneHost, controlPlanePort string, log *logger.Logger) error
|
||||
WaitForCilium(ctx context.Context, log *logger.Logger) error
|
||||
FixCilium(ctx context.Context) error
|
||||
StartKubelet() error
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
|||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||
func (k *KubeWrapper) InitCluster(
|
||||
ctx context.Context, cloudServiceAccountURI, versionString, clusterName string, measurementSalt []byte,
|
||||
helmReleasesRaw []byte, conformanceMode bool, kubernetesComponents components.Components, log *logger.Logger,
|
||||
helmReleasesRaw []byte, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, log *logger.Logger,
|
||||
) ([]byte, error) {
|
||||
log.With(zap.String("version", versionString)).Infof("Installing Kubernetes components")
|
||||
if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil {
|
||||
|
|
@ -110,16 +110,24 @@ func (k *KubeWrapper) InitCluster(
|
|||
}
|
||||
|
||||
// this is the endpoint in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>"
|
||||
controlPlaneEndpoint, err := k.providerMetadata.GetLoadBalancerEndpoint(ctx)
|
||||
// TODO(malt3): switch over to DNS name on AWS and Azure
|
||||
// soon as every apiserver certificate of every control-plane node
|
||||
// has the dns endpoint in its SAN list.
|
||||
controlPlaneHost, controlPlanePort, err := k.providerMetadata.GetLoadBalancerEndpoint(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving load balancer endpoint: %w", err)
|
||||
}
|
||||
|
||||
certSANs := []string{nodeIP}
|
||||
certSANs = append(certSANs, apiServerCertSANs...)
|
||||
|
||||
log.With(
|
||||
zap.String("nodeName", nodeName),
|
||||
zap.String("providerID", instance.ProviderID),
|
||||
zap.String("nodeIP", nodeIP),
|
||||
zap.String("controlPlaneEndpoint", controlPlaneEndpoint),
|
||||
zap.String("controlPlaneHost", controlPlaneHost),
|
||||
zap.String("controlPlanePort", controlPlanePort),
|
||||
zap.String("certSANs", strings.Join(certSANs, ",")),
|
||||
zap.String("podCIDR", subnetworkPodCIDR),
|
||||
).Infof("Setting information for node")
|
||||
|
||||
|
|
@ -130,16 +138,16 @@ func (k *KubeWrapper) InitCluster(
|
|||
initConfig := k.configProvider.InitConfiguration(ccmSupported, versionString)
|
||||
initConfig.SetNodeIP(nodeIP)
|
||||
initConfig.SetClusterName(clusterName)
|
||||
initConfig.SetCertSANs([]string{nodeIP})
|
||||
initConfig.SetCertSANs(certSANs)
|
||||
initConfig.SetNodeName(nodeName)
|
||||
initConfig.SetProviderID(instance.ProviderID)
|
||||
initConfig.SetControlPlaneEndpoint(controlPlaneEndpoint)
|
||||
initConfig.SetControlPlaneEndpoint(controlPlaneHost)
|
||||
initConfigYAML, err := initConfig.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err)
|
||||
}
|
||||
log.Infof("Initializing Kubernetes cluster")
|
||||
kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, controlPlaneEndpoint, conformanceMode, log)
|
||||
kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, controlPlaneHost, controlPlanePort, conformanceMode, log)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kubeadm init: %w", err)
|
||||
}
|
||||
|
|
@ -171,11 +179,12 @@ func (k *KubeWrapper) InitCluster(
|
|||
// Step 3: configure & start kubernetes controllers
|
||||
log.Infof("Starting Kubernetes controllers and deployments")
|
||||
setupPodNetworkInput := k8sapi.SetupPodNetworkInput{
|
||||
CloudProvider: k.cloudProvider,
|
||||
NodeName: nodeName,
|
||||
FirstNodePodCIDR: nodePodCIDR,
|
||||
SubnetworkPodCIDR: subnetworkPodCIDR,
|
||||
LoadBalancerEndpoint: controlPlaneEndpoint,
|
||||
CloudProvider: k.cloudProvider,
|
||||
NodeName: nodeName,
|
||||
FirstNodePodCIDR: nodePodCIDR,
|
||||
SubnetworkPodCIDR: subnetworkPodCIDR,
|
||||
LoadBalancerHost: controlPlaneHost,
|
||||
LoadBalancerPort: controlPlanePort,
|
||||
}
|
||||
|
||||
var helmReleases helm.Releases
|
||||
|
|
@ -206,20 +215,11 @@ func (k *KubeWrapper) InitCluster(
|
|||
// Continue and don't throw an error here - things might be okay.
|
||||
}
|
||||
|
||||
var controlPlaneIP string
|
||||
if strings.Contains(controlPlaneEndpoint, ":") {
|
||||
controlPlaneIP, _, err = net.SplitHostPort(controlPlaneEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing control plane endpoint: %w", err)
|
||||
}
|
||||
} else {
|
||||
controlPlaneIP = controlPlaneEndpoint
|
||||
}
|
||||
serviceConfig := constellationServicesConfig{
|
||||
measurementSalt: measurementSalt,
|
||||
subnetworkPodCIDR: subnetworkPodCIDR,
|
||||
cloudServiceAccountURI: cloudServiceAccountURI,
|
||||
loadBalancerIP: controlPlaneIP,
|
||||
loadBalancerIP: controlPlaneHost,
|
||||
}
|
||||
extraVals, err := k.setupExtraVals(ctx, serviceConfig)
|
||||
if err != nil {
|
||||
|
|
@ -300,7 +300,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
|||
return fmt.Errorf("generating node name: %w", err)
|
||||
}
|
||||
|
||||
loadbalancerEndpoint, err := k.providerMetadata.GetLoadBalancerEndpoint(ctx)
|
||||
loadBalancerHost, loadBalancerPort, err := k.providerMetadata.GetLoadBalancerEndpoint(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving own instance metadata: %w", err)
|
||||
}
|
||||
|
|
@ -309,6 +309,8 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
|||
zap.String("nodeName", nodeName),
|
||||
zap.String("providerID", providerID),
|
||||
zap.String("nodeIP", nodeInternalIP),
|
||||
zap.String("loadBalancerHost", loadBalancerHost),
|
||||
zap.String("loadBalancerPort", loadBalancerPort),
|
||||
).Infof("Setting information for node")
|
||||
|
||||
// Step 2: configure kubeadm join config
|
||||
|
|
@ -329,7 +331,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo
|
|||
return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err)
|
||||
}
|
||||
log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster")
|
||||
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadbalancerEndpoint, log); err != nil {
|
||||
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, peerRole, loadBalancerHost, loadBalancerPort, log); err != nil {
|
||||
return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,8 @@ func TestInitCluster(t *testing.T) {
|
|||
VPCIP: privateIP,
|
||||
AliasIPRanges: []string{aliasIPRange},
|
||||
},
|
||||
getLoadBalancerEndpointResp: loadbalancerIP,
|
||||
getLoadBalancerHostResp: loadbalancerIP,
|
||||
getLoadBalancerPortResp: strconv.Itoa(constants.KubernetesPort),
|
||||
},
|
||||
wantConfig: k8sapi.KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
|
|
@ -96,7 +97,8 @@ func TestInitCluster(t *testing.T) {
|
|||
VPCIP: privateIP,
|
||||
AliasIPRanges: []string{aliasIPRange},
|
||||
},
|
||||
getLoadBalancerEndpointResp: loadbalancerIP,
|
||||
getLoadBalancerHostResp: loadbalancerIP,
|
||||
getLoadBalancerPortResp: strconv.Itoa(constants.KubernetesPort),
|
||||
},
|
||||
kubectl: stubKubectl{annotateNodeErr: assert.AnError},
|
||||
wantErr: true,
|
||||
|
|
@ -191,7 +193,7 @@ func TestInitCluster(t *testing.T) {
|
|||
|
||||
_, err := kube.InitCluster(
|
||||
context.Background(), serviceAccountURI, string(tc.k8sVersion), "kubernetes",
|
||||
nil, []byte("{}"), false, nil, logger.NewTest(t),
|
||||
nil, []byte("{}"), false, nil, nil, logger.NewTest(t),
|
||||
)
|
||||
|
||||
if tc.wantErr {
|
||||
|
|
@ -449,7 +451,7 @@ func (s *stubClusterUtil) InstallComponents(_ context.Context, _ components.Comp
|
|||
return s.installComponentsErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ string, _ bool, _ *logger.Logger) ([]byte, error) {
|
||||
func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _, _ string, _ bool, _ *logger.Logger) ([]byte, error) {
|
||||
s.initConfigs = append(s.initConfigs, initConfig)
|
||||
return s.kubeconfig, s.initClusterErr
|
||||
}
|
||||
|
|
@ -474,7 +476,7 @@ func (s *stubClusterUtil) SetupNodeOperator(_ context.Context, _ k8sapi.Client,
|
|||
return s.setupNodeOperatorErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ role.Role, _ string, _ *logger.Logger) error {
|
||||
func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ role.Role, _, _ string, _ *logger.Logger) error {
|
||||
s.joinConfigs = append(s.joinConfigs, joinConfig)
|
||||
return s.joinClusterErr
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue