mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-06-24 22:20:40 -04:00
AB#2525 clean up unused code (#504)
* Rename Metadata->Cloud * Remove unused methods, functions, and variables * More privacy for testing stubs Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
7d16c02e0d
commit
5efe05d933
22 changed files with 115 additions and 484 deletions
|
@ -184,7 +184,7 @@ func main() {
|
||||||
issuer = initserver.NewIssuerWrapper(qemu.NewIssuer(), vmtype.Unknown, nil)
|
issuer = initserver.NewIssuerWrapper(qemu.NewIssuer(), vmtype.Unknown, nil)
|
||||||
|
|
||||||
cloudLogger = qemucloud.NewLogger()
|
cloudLogger = qemucloud.NewLogger()
|
||||||
metadata := &qemucloud.Metadata{}
|
metadata := qemucloud.New()
|
||||||
pcrsJSON, err := json.Marshal(pcrs)
|
pcrsJSON, err := json.Marshal(pcrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to marshal PCRs")
|
log.With(zap.Error(err)).Fatalf("Failed to marshal PCRs")
|
||||||
|
|
|
@ -16,49 +16,31 @@ import (
|
||||||
type ProviderMetadata interface {
|
type ProviderMetadata interface {
|
||||||
// UID returns the unique identifier for the constellation.
|
// UID returns the unique identifier for the constellation.
|
||||||
UID(ctx context.Context) (string, error)
|
UID(ctx context.Context) (string, error)
|
||||||
// List retrieves all instances belonging to the current Constellation.
|
|
||||||
List(ctx context.Context) ([]metadata.InstanceMetadata, error)
|
|
||||||
// Self retrieves the current instance.
|
// Self retrieves the current instance.
|
||||||
Self(ctx context.Context) (metadata.InstanceMetadata, error)
|
Self(ctx context.Context) (metadata.InstanceMetadata, error)
|
||||||
// GetLoadBalancerEndpoint retrieves the load balancer endpoint.
|
// GetLoadBalancerEndpoint retrieves the load balancer endpoint.
|
||||||
GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
||||||
// GetInstance retrieves an instance using its providerID.
|
|
||||||
GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubProviderMetadata struct {
|
type stubProviderMetadata struct {
|
||||||
GetLoadBalancerEndpointErr error
|
getLoadBalancerEndpointErr error
|
||||||
GetLoadBalancerEndpointResp string
|
getLoadBalancerEndpointResp string
|
||||||
|
|
||||||
ListErr error
|
selfErr error
|
||||||
ListResp []metadata.InstanceMetadata
|
selfResp metadata.InstanceMetadata
|
||||||
|
|
||||||
SelfErr error
|
uidErr error
|
||||||
SelfResp metadata.InstanceMetadata
|
uidResp string
|
||||||
|
|
||||||
GetInstanceErr error
|
|
||||||
GetInstanceResp metadata.InstanceMetadata
|
|
||||||
|
|
||||||
UIDErr error
|
|
||||||
UIDResp string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *stubProviderMetadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
func (m *stubProviderMetadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
||||||
return m.GetLoadBalancerEndpointResp, m.GetLoadBalancerEndpointErr
|
return m.getLoadBalancerEndpointResp, m.getLoadBalancerEndpointErr
|
||||||
}
|
|
||||||
|
|
||||||
func (m *stubProviderMetadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
|
|
||||||
return m.ListResp, m.ListErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *stubProviderMetadata) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
|
func (m *stubProviderMetadata) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
|
||||||
return m.SelfResp, m.SelfErr
|
return m.selfResp, m.selfErr
|
||||||
}
|
|
||||||
|
|
||||||
func (m *stubProviderMetadata) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
|
|
||||||
return m.GetInstanceResp, m.GetInstanceErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *stubProviderMetadata) UID(ctx context.Context) (string, error) {
|
func (m *stubProviderMetadata) UID(ctx context.Context) (string, error) {
|
||||||
return m.UIDResp, m.UIDErr
|
return m.uidResp, m.uidErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package k8sapi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/google/shlex"
|
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseJoinCommand parses API server endpoint, token and CA cert hash from
|
|
||||||
// provided kubeadm join shell command, and returns it as a BootstrapTokenDiscovery.
|
|
||||||
//
|
|
||||||
// Expected format:
|
|
||||||
// kubeadm join [API_SERVER_ENDPOINT] --token [TOKEN] --discovery-token-ca-cert-hash [DISCOVERY_TOKEN_CA_CERT_HASH] --control-plane
|
|
||||||
// .
|
|
||||||
func ParseJoinCommand(joinCommand string) (*kubeadm.BootstrapTokenDiscovery, error) {
|
|
||||||
// split and verify that this is a kubeadm join command
|
|
||||||
argv, err := shlex.Split(joinCommand)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("kubadm join command could not be tokenized: %v", joinCommand)
|
|
||||||
}
|
|
||||||
if len(argv) < 3 {
|
|
||||||
return nil, fmt.Errorf("kubadm join command is too short: %v", argv)
|
|
||||||
}
|
|
||||||
if argv[0] != "kubeadm" || argv[1] != "join" {
|
|
||||||
return nil, fmt.Errorf("not a kubeadm join command: %v", argv)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := kubeadm.BootstrapTokenDiscovery{APIServerEndpoint: argv[2]}
|
|
||||||
|
|
||||||
var caCertHash string
|
|
||||||
// parse flags
|
|
||||||
flags := flag.NewFlagSet("", flag.ContinueOnError)
|
|
||||||
flags.StringVar(&result.Token, "token", "", "")
|
|
||||||
flags.StringVar(&caCertHash, "discovery-token-ca-cert-hash", "", "")
|
|
||||||
flags.Bool("control-plane", false, "")
|
|
||||||
flags.String("certificate-key", "", "")
|
|
||||||
if err := flags.Parse(argv[3:]); err != nil {
|
|
||||||
return nil, fmt.Errorf("parsing flag arguments: %v %w", argv, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Token == "" {
|
|
||||||
return nil, fmt.Errorf("missing flag argument token: %v", argv)
|
|
||||||
}
|
|
||||||
if caCertHash == "" {
|
|
||||||
return nil, fmt.Errorf("missing flag argument discovery-token-ca-cert-hash: %v", argv)
|
|
||||||
}
|
|
||||||
result.CACertHashes = []string{caCertHash}
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
|
@ -1,75 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright (c) Edgeless Systems GmbH
|
|
||||||
|
|
||||||
SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
*/
|
|
||||||
|
|
||||||
package k8sapi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseJoinCommand(t *testing.T) {
|
|
||||||
testCases := map[string]struct {
|
|
||||||
joinCommand string
|
|
||||||
wantJoinArgs kubeadm.BootstrapTokenDiscovery
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
"join command can be parsed": {
|
|
||||||
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
|
|
||||||
wantJoinArgs: kubeadm.BootstrapTokenDiscovery{
|
|
||||||
APIServerEndpoint: "192.0.2.0:8443",
|
|
||||||
Token: "dummy-token",
|
|
||||||
CACertHashes: []string{"sha512:dummy-hash"},
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
"incorrect join command returns error": {
|
|
||||||
joinCommand: "some string",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"missing api server endpoint is checked": {
|
|
||||||
joinCommand: "kubeadm join --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"missing token is checked": {
|
|
||||||
joinCommand: "kubeadm join 192.0.2.0:8443 --discovery-token-ca-cert-hash sha512:dummy-hash --control-plane",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"missing discovery-token-ca-cert-hash is checked": {
|
|
||||||
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --control-plane",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
"missing control-plane": {
|
|
||||||
joinCommand: "kubeadm join 192.0.2.0:8443 --token dummy-token --discovery-token-ca-cert-hash sha512:dummy-hash",
|
|
||||||
wantJoinArgs: kubeadm.BootstrapTokenDiscovery{
|
|
||||||
APIServerEndpoint: "192.0.2.0:8443",
|
|
||||||
Token: "dummy-token",
|
|
||||||
CACertHashes: []string{"sha512:dummy-hash"},
|
|
||||||
},
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
joinArgs, err := ParseJoinCommand(tc.joinCommand)
|
|
||||||
|
|
||||||
if tc.wantErr {
|
|
||||||
assert.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
assert.Equal(&tc.wantJoinArgs, joinArgs)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -252,7 +252,6 @@ type SetupPodNetworkInput struct {
|
||||||
NodeName string
|
NodeName string
|
||||||
FirstNodePodCIDR string
|
FirstNodePodCIDR string
|
||||||
SubnetworkPodCIDR string
|
SubnetworkPodCIDR string
|
||||||
ProviderID string
|
|
||||||
LoadBalancerEndpoint string
|
LoadBalancerEndpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,14 +316,6 @@ func (k *KubernetesUtil) FixCilium(log *logger.Logger) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupAutoscaling deploys the k8s cluster autoscaler.
|
|
||||||
func (k *KubernetesUtil) SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration kubernetes.Marshaler, secrets kubernetes.Marshaler) error {
|
|
||||||
if err := kubectl.Apply(secrets, true); err != nil {
|
|
||||||
return fmt.Errorf("applying cluster-autoscaler Secrets: %w", err)
|
|
||||||
}
|
|
||||||
return kubectl.Apply(clusterAutoscalerConfiguration, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetupGCPGuestAgent deploys the GCP guest agent daemon set.
|
// SetupGCPGuestAgent deploys the GCP guest agent daemon set.
|
||||||
func (k *KubernetesUtil) SetupGCPGuestAgent(kubectl Client, guestAgentDaemonset kubernetes.Marshaler) error {
|
func (k *KubernetesUtil) SetupGCPGuestAgent(kubectl Client, guestAgentDaemonset kubernetes.Marshaler) error {
|
||||||
return kubectl.Apply(guestAgentDaemonset, true)
|
return kubectl.Apply(guestAgentDaemonset, true)
|
||||||
|
|
|
@ -260,12 +260,6 @@ func (k *KubeadmJoinYAML) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(k)
|
return kubernetes.MarshalK8SResources(k)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal from a k8s resource YAML.
|
|
||||||
func (k *KubeadmJoinYAML) Unmarshal(yamlData []byte) (KubeadmJoinYAML, error) {
|
|
||||||
var tmp KubeadmJoinYAML
|
|
||||||
return tmp, kubernetes.UnmarshalK8SResources(yamlData, &tmp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KubeadmInitYAML holds configuration for kubeadm init workflow.
|
// KubeadmInitYAML holds configuration for kubeadm init workflow.
|
||||||
type KubeadmInitYAML struct {
|
type KubeadmInitYAML struct {
|
||||||
InitConfiguration kubeadm.InitConfiguration
|
InitConfiguration kubeadm.InitConfiguration
|
||||||
|
@ -288,11 +282,6 @@ func (k *KubeadmInitYAML) SetCertSANs(certSANs []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetAPIServerAdvertiseAddress sets the advertised API server address.
|
|
||||||
func (k *KubeadmInitYAML) SetAPIServerAdvertiseAddress(apiServerAdvertiseAddress string) {
|
|
||||||
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetControlPlaneEndpoint sets the control plane endpoint if controlPlaneEndpoint is not empty.
|
// SetControlPlaneEndpoint sets the control plane endpoint if controlPlaneEndpoint is not empty.
|
||||||
func (k *KubeadmInitYAML) SetControlPlaneEndpoint(controlPlaneEndpoint string) {
|
func (k *KubeadmInitYAML) SetControlPlaneEndpoint(controlPlaneEndpoint string) {
|
||||||
if controlPlaneEndpoint != "" {
|
if controlPlaneEndpoint != "" {
|
||||||
|
@ -300,21 +289,6 @@ func (k *KubeadmInitYAML) SetControlPlaneEndpoint(controlPlaneEndpoint string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetServiceCIDR sets the CIDR of service subnet.
|
|
||||||
func (k *KubeadmInitYAML) SetServiceCIDR(serviceCIDR string) {
|
|
||||||
k.ClusterConfiguration.Networking.ServiceSubnet = serviceCIDR
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPodNetworkCIDR sets the CIDR of pod subnet.
|
|
||||||
func (k *KubeadmInitYAML) SetPodNetworkCIDR(podNetworkCIDR string) {
|
|
||||||
k.ClusterConfiguration.Networking.PodSubnet = podNetworkCIDR
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServiceDNSDomain sets the dns domain.
|
|
||||||
func (k *KubeadmInitYAML) SetServiceDNSDomain(serviceDNSDomain string) {
|
|
||||||
k.ClusterConfiguration.Networking.DNSDomain = serviceDNSDomain
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeIP sets the node IP.
|
// SetNodeIP sets the node IP.
|
||||||
func (k *KubeadmInitYAML) SetNodeIP(nodeIP string) {
|
func (k *KubeadmInitYAML) SetNodeIP(nodeIP string) {
|
||||||
if k.InitConfiguration.NodeRegistration.KubeletExtraArgs == nil {
|
if k.InitConfiguration.NodeRegistration.KubeletExtraArgs == nil {
|
||||||
|
@ -337,9 +311,3 @@ func (k *KubeadmInitYAML) SetProviderID(providerID string) {
|
||||||
func (k *KubeadmInitYAML) Marshal() ([]byte, error) {
|
func (k *KubeadmInitYAML) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(k)
|
return kubernetes.MarshalK8SResources(k)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal from a k8s resource YAML.
|
|
||||||
func (k *KubeadmInitYAML) Unmarshal(yamlData []byte) (KubeadmInitYAML, error) {
|
|
||||||
var tmp KubeadmInitYAML
|
|
||||||
return tmp, kubernetes.UnmarshalK8SResources(yamlData, &tmp)
|
|
||||||
}
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ package k8sapi
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||||
"github.com/edgelesssys/constellation/v2/internal/versions"
|
"github.com/edgelesssys/constellation/v2/internal/versions"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -32,11 +33,8 @@ func TestInitConfiguration(t *testing.T) {
|
||||||
"kubeadm init config with all fields can be created": {
|
"kubeadm init config with all fields can be created": {
|
||||||
config: func() KubeadmInitYAML {
|
config: func() KubeadmInitYAML {
|
||||||
c := kubeadmConfig.InitConfiguration(true, versions.Default)
|
c := kubeadmConfig.InitConfiguration(true, versions.Default)
|
||||||
c.SetAPIServerAdvertiseAddress("192.0.2.0")
|
|
||||||
c.SetNodeIP("192.0.2.0")
|
c.SetNodeIP("192.0.2.0")
|
||||||
c.SetNodeName("node")
|
c.SetNodeName("node")
|
||||||
c.SetPodNetworkCIDR("10.244.0.0/16")
|
|
||||||
c.SetServiceCIDR("10.245.0.0/24")
|
|
||||||
c.SetProviderID("somecloudprovider://instance-id")
|
c.SetProviderID("somecloudprovider://instance-id")
|
||||||
return c
|
return c
|
||||||
}(),
|
}(),
|
||||||
|
@ -49,8 +47,8 @@ func TestInitConfiguration(t *testing.T) {
|
||||||
|
|
||||||
config, err := tc.config.Marshal()
|
config, err := tc.config.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
tmp, err := tc.config.Unmarshal(config)
|
var tmp KubeadmInitYAML
|
||||||
require.NoError(err)
|
require.NoError(kubernetes.UnmarshalK8SResources(config, &tmp))
|
||||||
// test on correct mashalling and unmarshalling
|
// test on correct mashalling and unmarshalling
|
||||||
assert.Equal(tc.config.ClusterConfiguration, tmp.ClusterConfiguration)
|
assert.Equal(tc.config.ClusterConfiguration, tmp.ClusterConfiguration)
|
||||||
assert.Equal(tc.config.InitConfiguration, tmp.InitConfiguration)
|
assert.Equal(tc.config.InitConfiguration, tmp.InitConfiguration)
|
||||||
|
@ -120,8 +118,8 @@ func TestJoinConfiguration(t *testing.T) {
|
||||||
|
|
||||||
config, err := tc.config.Marshal()
|
config, err := tc.config.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
tmp, err := tc.config.Unmarshal(config)
|
var tmp KubeadmJoinYAML
|
||||||
require.NoError(err)
|
require.NoError(kubernetes.UnmarshalK8SResources(config, &tmp))
|
||||||
// test on correct mashalling and unmarshalling
|
// test on correct mashalling and unmarshalling
|
||||||
assert.Equal(tc.config.JoinConfiguration, tmp.JoinConfiguration)
|
assert.Equal(tc.config.JoinConfiguration, tmp.JoinConfiguration)
|
||||||
})
|
})
|
||||||
|
|
|
@ -39,30 +39,30 @@ users:
|
||||||
`
|
`
|
||||||
|
|
||||||
type stubClientConfig struct {
|
type stubClientConfig struct {
|
||||||
RawConfigConfig clientcmdapi.Config
|
rawConfigConfig clientcmdapi.Config
|
||||||
RawConfigErr error
|
rawConfigErr error
|
||||||
ClientConfigConfig *restclient.Config
|
clientConfigConfig *restclient.Config
|
||||||
ClientConfigErr error
|
clientConfigErr error
|
||||||
NamespaceString string
|
namespaceString string
|
||||||
NamespaceOverridden bool
|
namespaceOverridden bool
|
||||||
NamespaceErr error
|
namespaceErr error
|
||||||
ConfigAccessResult clientcmd.ConfigAccess
|
configAccessResult clientcmd.ConfigAccess
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClientConfig) RawConfig() (clientcmdapi.Config, error) {
|
func (s *stubClientConfig) RawConfig() (clientcmdapi.Config, error) {
|
||||||
return s.RawConfigConfig, s.RawConfigErr
|
return s.rawConfigConfig, s.rawConfigErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClientConfig) ClientConfig() (*restclient.Config, error) {
|
func (s *stubClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||||
return s.ClientConfigConfig, s.ClientConfigErr
|
return s.clientConfigConfig, s.clientConfigErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClientConfig) Namespace() (string, bool, error) {
|
func (s *stubClientConfig) Namespace() (string, bool, error) {
|
||||||
return s.NamespaceString, s.NamespaceOverridden, s.NamespaceErr
|
return s.namespaceString, s.namespaceOverridden, s.namespaceErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubClientConfig) ConfigAccess() clientcmd.ConfigAccess {
|
func (s *stubClientConfig) ConfigAccess() clientcmd.ConfigAccess {
|
||||||
return s.ConfigAccessResult
|
return s.configAccessResult
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewRESTClientGetter(t *testing.T) {
|
func TestNewRESTClientGetter(t *testing.T) {
|
||||||
|
@ -76,7 +76,7 @@ func TestToRESTConfig(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
getter := restClientGetter{
|
getter := restClientGetter{
|
||||||
clientconfig: &stubClientConfig{
|
clientconfig: &stubClientConfig{
|
||||||
ClientConfigConfig: &restclient.Config{},
|
clientConfigConfig: &restclient.Config{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
result, err := getter.ToRESTConfig()
|
result, err := getter.ToRESTConfig()
|
||||||
|
@ -88,7 +88,7 @@ func TestToDiscoveryClient(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
getter := restClientGetter{
|
getter := restClientGetter{
|
||||||
clientconfig: &stubClientConfig{
|
clientconfig: &stubClientConfig{
|
||||||
ClientConfigConfig: &restclient.Config{},
|
clientConfigConfig: &restclient.Config{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
result, err := getter.ToDiscoveryClient()
|
result, err := getter.ToDiscoveryClient()
|
||||||
|
@ -100,7 +100,7 @@ func TestToDiscoveryClientFail(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
getter := restClientGetter{
|
getter := restClientGetter{
|
||||||
clientconfig: &stubClientConfig{
|
clientconfig: &stubClientConfig{
|
||||||
ClientConfigErr: errors.New("someErr"),
|
clientConfigErr: errors.New("someErr"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := getter.ToDiscoveryClient()
|
_, err := getter.ToDiscoveryClient()
|
||||||
|
@ -111,7 +111,7 @@ func TestToRESTMapper(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
getter := restClientGetter{
|
getter := restClientGetter{
|
||||||
clientconfig: &stubClientConfig{
|
clientconfig: &stubClientConfig{
|
||||||
ClientConfigConfig: &restclient.Config{},
|
clientConfigConfig: &restclient.Config{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
result, err := getter.ToRESTMapper()
|
result, err := getter.ToRESTMapper()
|
||||||
|
@ -123,7 +123,7 @@ func TestToRESTMapperFail(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
getter := restClientGetter{
|
getter := restClientGetter{
|
||||||
clientconfig: &stubClientConfig{
|
clientconfig: &stubClientConfig{
|
||||||
ClientConfigErr: errors.New("someErr"),
|
clientConfigErr: errors.New("someErr"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := getter.ToRESTMapper()
|
_, err := getter.ToRESTMapper()
|
||||||
|
@ -132,7 +132,7 @@ func TestToRESTMapperFail(t *testing.T) {
|
||||||
|
|
||||||
func TestToRawKubeConfigLoader(t *testing.T) {
|
func TestToRawKubeConfigLoader(t *testing.T) {
|
||||||
clientConfig := stubClientConfig{
|
clientConfig := stubClientConfig{
|
||||||
ClientConfigConfig: &restclient.Config{},
|
clientConfigConfig: &restclient.Config{},
|
||||||
}
|
}
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
getter := restClientGetter{
|
getter := restClientGetter{
|
||||||
|
|
|
@ -22,14 +22,6 @@ const (
|
||||||
nodeOperatorCatalogNamespace = "olm"
|
nodeOperatorCatalogNamespace = "olm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeOperatorCRDNames are the names of the custom resource definitions that are used by the node operator.
|
|
||||||
var NodeOperatorCRDNames = []string{
|
|
||||||
"autoscalingstrategies.update.edgeless.systems",
|
|
||||||
"nodeimages.update.edgeless.systems",
|
|
||||||
"pendingnodes.update.edgeless.systems",
|
|
||||||
"scalinggroups.update.edgeless.systems",
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeOperatorDeployment groups all deployments for node operator.
|
// NodeOperatorDeployment groups all deployments for node operator.
|
||||||
type NodeOperatorDeployment struct {
|
type NodeOperatorDeployment struct {
|
||||||
CatalogSource operatorsv1alpha1.CatalogSource
|
CatalogSource operatorsv1alpha1.CatalogSource
|
||||||
|
|
|
@ -58,17 +58,17 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init works with metadata and loadbalancer": {
|
"kubeadm init works with metadata and loadbalancer": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
SelfResp: metadata.InstanceMetadata{
|
selfResp: metadata.InstanceMetadata{
|
||||||
Name: nodeName,
|
Name: nodeName,
|
||||||
ProviderID: providerID,
|
ProviderID: providerID,
|
||||||
VPCIP: privateIP,
|
VPCIP: privateIP,
|
||||||
AliasIPRanges: []string{aliasIPRange},
|
AliasIPRanges: []string{aliasIPRange},
|
||||||
},
|
},
|
||||||
GetLoadBalancerEndpointResp: loadbalancerIP,
|
getLoadBalancerEndpointResp: loadbalancerIP,
|
||||||
},
|
},
|
||||||
wantConfig: k8sapi.KubeadmInitYAML{
|
wantConfig: k8sapi.KubeadmInitYAML{
|
||||||
InitConfiguration: kubeadm.InitConfiguration{
|
InitConfiguration: kubeadm.InitConfiguration{
|
||||||
|
@ -93,11 +93,11 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when retrieving metadata self": {
|
"kubeadm init fails when retrieving metadata self": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
SelfErr: someErr,
|
selfErr: someErr,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
k8sVersion: versions.Default,
|
k8sVersion: versions.Default,
|
||||||
|
@ -105,10 +105,10 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when retrieving metadata loadbalancer ip": {
|
"kubeadm init fails when retrieving metadata loadbalancer ip": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
GetLoadBalancerEndpointErr: someErr,
|
getLoadBalancerEndpointErr: someErr,
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
k8sVersion: versions.Default,
|
k8sVersion: versions.Default,
|
||||||
|
@ -116,7 +116,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when applying the init config": {
|
"kubeadm init fails when applying the init config": {
|
||||||
clusterUtil: stubClusterUtil{initClusterErr: someErr},
|
clusterUtil: stubClusterUtil{initClusterErr: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -127,7 +127,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
helmClient: stubHelmClient{ciliumError: someErr},
|
helmClient: stubHelmClient{ciliumError: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
@ -137,7 +137,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
helmClient: stubHelmClient{servicesError: someErr},
|
helmClient: stubHelmClient{servicesError: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -148,7 +148,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
helmClient: stubHelmClient{servicesError: someErr},
|
helmClient: stubHelmClient{servicesError: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -159,7 +159,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
helmClient: stubHelmClient{servicesError: someErr},
|
helmClient: stubHelmClient{servicesError: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -169,7 +169,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when reading kubeconfig": {
|
"kubeadm init fails when reading kubeconfig": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
ReadErr: someErr,
|
readErr: someErr,
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -179,7 +179,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when setting up konnectivity": {
|
"kubeadm init fails when setting up konnectivity": {
|
||||||
clusterUtil: stubClusterUtil{setupKonnectivityError: someErr},
|
clusterUtil: stubClusterUtil{setupKonnectivityError: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -189,7 +189,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when setting up verification service": {
|
"kubeadm init fails when setting up verification service": {
|
||||||
clusterUtil: stubClusterUtil{setupVerificationServiceErr: someErr},
|
clusterUtil: stubClusterUtil{setupVerificationServiceErr: someErr},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -199,7 +199,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
"kubeadm init fails when waiting for kubeAPI server": {
|
"kubeadm init fails when waiting for kubeAPI server": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{waitErr: someErr},
|
kubeAPIWaiter: stubKubeAPIWaiter{waitErr: someErr},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -209,7 +209,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
"unsupported k8sVersion fails cluster creation": {
|
"unsupported k8sVersion fails cluster creation": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
kubeconfigReader: &stubKubeconfigReader{
|
kubeconfigReader: &stubKubeconfigReader{
|
||||||
Kubeconfig: []byte("someKubeconfig"),
|
kubeconfig: []byte("someKubeconfig"),
|
||||||
},
|
},
|
||||||
kubeAPIWaiter: stubKubeAPIWaiter{},
|
kubeAPIWaiter: stubKubeAPIWaiter{},
|
||||||
providerMetadata: &stubProviderMetadata{},
|
providerMetadata: &stubProviderMetadata{},
|
||||||
|
@ -228,7 +228,7 @@ func TestInitCluster(t *testing.T) {
|
||||||
helmClient: &tc.helmClient,
|
helmClient: &tc.helmClient,
|
||||||
providerMetadata: tc.providerMetadata,
|
providerMetadata: tc.providerMetadata,
|
||||||
kubeAPIWaiter: &tc.kubeAPIWaiter,
|
kubeAPIWaiter: &tc.kubeAPIWaiter,
|
||||||
configProvider: &stubConfigProvider{InitConfig: k8sapi.KubeadmInitYAML{}},
|
configProvider: &stubConfigProvider{initConfig: k8sapi.KubeadmInitYAML{}},
|
||||||
client: &tc.kubectl,
|
client: &tc.kubectl,
|
||||||
kubeconfigReader: tc.kubeconfigReader,
|
kubeconfigReader: tc.kubeconfigReader,
|
||||||
getIPAddr: func() (string, error) { return privateIP, nil },
|
getIPAddr: func() (string, error) { return privateIP, nil },
|
||||||
|
@ -274,7 +274,7 @@ func TestJoinCluster(t *testing.T) {
|
||||||
"kubeadm join worker works with metadata": {
|
"kubeadm join worker works with metadata": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
SelfResp: metadata.InstanceMetadata{
|
selfResp: metadata.InstanceMetadata{
|
||||||
ProviderID: "provider-id",
|
ProviderID: "provider-id",
|
||||||
Name: "metadata-name",
|
Name: "metadata-name",
|
||||||
VPCIP: "192.0.2.1",
|
VPCIP: "192.0.2.1",
|
||||||
|
@ -294,7 +294,7 @@ func TestJoinCluster(t *testing.T) {
|
||||||
"kubeadm join worker works with metadata and cloud controller manager": {
|
"kubeadm join worker works with metadata and cloud controller manager": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
SelfResp: metadata.InstanceMetadata{
|
selfResp: metadata.InstanceMetadata{
|
||||||
ProviderID: "provider-id",
|
ProviderID: "provider-id",
|
||||||
Name: "metadata-name",
|
Name: "metadata-name",
|
||||||
VPCIP: "192.0.2.1",
|
VPCIP: "192.0.2.1",
|
||||||
|
@ -314,7 +314,7 @@ func TestJoinCluster(t *testing.T) {
|
||||||
"kubeadm join control-plane node works with metadata": {
|
"kubeadm join control-plane node works with metadata": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
SelfResp: metadata.InstanceMetadata{
|
selfResp: metadata.InstanceMetadata{
|
||||||
ProviderID: "provider-id",
|
ProviderID: "provider-id",
|
||||||
Name: "metadata-name",
|
Name: "metadata-name",
|
||||||
VPCIP: "192.0.2.1",
|
VPCIP: "192.0.2.1",
|
||||||
|
@ -341,7 +341,7 @@ func TestJoinCluster(t *testing.T) {
|
||||||
"kubeadm join worker fails when retrieving self metadata": {
|
"kubeadm join worker fails when retrieving self metadata": {
|
||||||
clusterUtil: stubClusterUtil{},
|
clusterUtil: stubClusterUtil{},
|
||||||
providerMetadata: &stubProviderMetadata{
|
providerMetadata: &stubProviderMetadata{
|
||||||
SelfErr: someErr,
|
selfErr: someErr,
|
||||||
},
|
},
|
||||||
role: role.Worker,
|
role: role.Worker,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
@ -374,8 +374,7 @@ func TestJoinCluster(t *testing.T) {
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var joinYaml k8sapi.KubeadmJoinYAML
|
var joinYaml k8sapi.KubeadmJoinYAML
|
||||||
joinYaml, err = joinYaml.Unmarshal(tc.clusterUtil.joinConfigs[0])
|
require.NoError(kubernetes.UnmarshalK8SResources(tc.clusterUtil.joinConfigs[0], &joinYaml))
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
||||||
})
|
})
|
||||||
|
@ -481,30 +480,30 @@ func (s *stubClusterUtil) FixCilium(log *logger.Logger) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubConfigProvider struct {
|
type stubConfigProvider struct {
|
||||||
InitConfig k8sapi.KubeadmInitYAML
|
initConfig k8sapi.KubeadmInitYAML
|
||||||
JoinConfig k8sapi.KubeadmJoinYAML
|
joinConfig k8sapi.KubeadmJoinYAML
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubConfigProvider) InitConfiguration(_ bool, _ versions.ValidK8sVersion) k8sapi.KubeadmInitYAML {
|
func (s *stubConfigProvider) InitConfiguration(_ bool, _ versions.ValidK8sVersion) k8sapi.KubeadmInitYAML {
|
||||||
return s.InitConfig
|
return s.initConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubConfigProvider) JoinConfiguration(_ bool) k8sapi.KubeadmJoinYAML {
|
func (s *stubConfigProvider) JoinConfiguration(_ bool) k8sapi.KubeadmJoinYAML {
|
||||||
s.JoinConfig = k8sapi.KubeadmJoinYAML{
|
s.joinConfig = k8sapi.KubeadmJoinYAML{
|
||||||
JoinConfiguration: kubeadm.JoinConfiguration{
|
JoinConfiguration: kubeadm.JoinConfiguration{
|
||||||
Discovery: kubeadm.Discovery{
|
Discovery: kubeadm.Discovery{
|
||||||
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
|
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return s.JoinConfig
|
return s.joinConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubKubectl struct {
|
type stubKubectl struct {
|
||||||
ApplyErr error
|
applyErr error
|
||||||
createConfigMapErr error
|
createConfigMapErr error
|
||||||
AddTolerationsToDeploymentErr error
|
addTolerationsToDeploymentErr error
|
||||||
AddTNodeSelectorsToDeploymentErr error
|
addTNodeSelectorsToDeploymentErr error
|
||||||
waitForCRDsErr error
|
waitForCRDsErr error
|
||||||
listAllNamespacesErr error
|
listAllNamespacesErr error
|
||||||
|
|
||||||
|
@ -515,7 +514,7 @@ type stubKubectl struct {
|
||||||
|
|
||||||
func (s *stubKubectl) Apply(resources kubernetes.Marshaler, forceConflicts bool) error {
|
func (s *stubKubectl) Apply(resources kubernetes.Marshaler, forceConflicts bool) error {
|
||||||
s.resources = append(s.resources, resources)
|
s.resources = append(s.resources, resources)
|
||||||
return s.ApplyErr
|
return s.applyErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubKubectl) SetKubeconfig(kubeconfig []byte) {
|
func (s *stubKubectl) SetKubeconfig(kubeconfig []byte) {
|
||||||
|
@ -527,11 +526,11 @@ func (s *stubKubectl) CreateConfigMap(ctx context.Context, configMap corev1.Conf
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubKubectl) AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error {
|
func (s *stubKubectl) AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error {
|
||||||
return s.AddTolerationsToDeploymentErr
|
return s.addTolerationsToDeploymentErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubKubectl) AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error {
|
func (s *stubKubectl) AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error {
|
||||||
return s.AddTNodeSelectorsToDeploymentErr
|
return s.addTNodeSelectorsToDeploymentErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubKubectl) WaitForCRDs(ctx context.Context, crds []string) error {
|
func (s *stubKubectl) WaitForCRDs(ctx context.Context, crds []string) error {
|
||||||
|
@ -543,12 +542,12 @@ func (s *stubKubectl) ListAllNamespaces(ctx context.Context) (*corev1.NamespaceL
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubKubeconfigReader struct {
|
type stubKubeconfigReader struct {
|
||||||
Kubeconfig []byte
|
kubeconfig []byte
|
||||||
ReadErr error
|
readErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stubKubeconfigReader) ReadKubeconfig() ([]byte, error) {
|
func (s *stubKubeconfigReader) ReadKubeconfig() ([]byte, error) {
|
||||||
return s.Kubeconfig, s.ReadErr
|
return s.kubeconfig, s.readErr
|
||||||
}
|
}
|
||||||
|
|
||||||
type stubHelmClient struct {
|
type stubHelmClient struct {
|
||||||
|
|
|
@ -80,7 +80,7 @@ func main() {
|
||||||
fetcher = cloudprovider.New(meta)
|
fetcher = cloudprovider.New(meta)
|
||||||
|
|
||||||
case platform.QEMU:
|
case platform.QEMU:
|
||||||
fetcher = cloudprovider.New(&qemucloud.Metadata{})
|
fetcher = cloudprovider.New(qemucloud.New())
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)
|
log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)
|
||||||
|
|
|
@ -170,8 +170,6 @@ type stubMetadata struct {
|
||||||
listErr error
|
listErr error
|
||||||
selfRes metadata.InstanceMetadata
|
selfRes metadata.InstanceMetadata
|
||||||
selfErr error
|
selfErr error
|
||||||
getInstanceRes metadata.InstanceMetadata
|
|
||||||
getInstanceErr error
|
|
||||||
getLBEndpointRes string
|
getLBEndpointRes string
|
||||||
getLBEndpointErr error
|
getLBEndpointErr error
|
||||||
}
|
}
|
||||||
|
@ -184,10 +182,6 @@ func (m *stubMetadata) Self(ctx context.Context) (metadata.InstanceMetadata, err
|
||||||
return m.selfRes, m.selfErr
|
return m.selfRes, m.selfErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
|
|
||||||
return m.getInstanceRes, m.getInstanceErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *stubMetadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
func (m *stubMetadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
||||||
return m.getLBEndpointRes, m.getLBEndpointErr
|
return m.getLBEndpointRes, m.getLBEndpointErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,7 +109,7 @@ func main() {
|
||||||
case cloudprovider.QEMU:
|
case cloudprovider.QEMU:
|
||||||
diskPath = qemuStateDiskPath
|
diskPath = qemuStateDiskPath
|
||||||
issuer = qemu.NewIssuer()
|
issuer = qemu.NewIssuer()
|
||||||
metadataAPI = &qemucloud.Metadata{}
|
metadataAPI = qemucloud.New()
|
||||||
_ = exportPCRs()
|
_ = exportPCRs()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -63,7 +63,6 @@ require (
|
||||||
github.com/go-playground/validator/v10 v10.11.1
|
github.com/go-playground/validator/v10 v10.11.1
|
||||||
github.com/google/go-tpm v0.3.3
|
github.com/google/go-tpm v0.3.3
|
||||||
github.com/google/go-tpm-tools v0.3.9
|
github.com/google/go-tpm-tools v0.3.9
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
|
||||||
github.com/google/tink/go v1.7.0
|
github.com/google/tink/go v1.7.0
|
||||||
github.com/googleapis/gax-go/v2 v2.7.0
|
github.com/googleapis/gax-go/v2 v2.7.0
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
|
@ -117,6 +116,7 @@ require (
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
||||||
github.com/google/logger v1.1.1 // indirect
|
github.com/google/logger v1.1.1 // indirect
|
||||||
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
||||||
golang.org/x/text v0.4.0 // indirect
|
golang.org/x/text v0.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -47,8 +47,8 @@ type imdsAPI interface {
|
||||||
GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error)
|
GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata implements core.ProviderMetadata interface for AWS.
|
// Cloud provides AWS metadata and API access.
|
||||||
type Metadata struct {
|
type Cloud struct {
|
||||||
ec2 ec2API
|
ec2 ec2API
|
||||||
imds imdsAPI
|
imds imdsAPI
|
||||||
loadbalancer loadbalancerAPI
|
loadbalancer loadbalancerAPI
|
||||||
|
@ -57,12 +57,12 @@ type Metadata struct {
|
||||||
|
|
||||||
// New initializes a new AWS Metadata client using instance default credentials.
|
// New initializes a new AWS Metadata client using instance default credentials.
|
||||||
// Default region is set up using the AWS imds api.
|
// Default region is set up using the AWS imds api.
|
||||||
func New(ctx context.Context) (*Metadata, error) {
|
func New(ctx context.Context) (*Cloud, error) {
|
||||||
cfg, err := config.LoadDefaultConfig(ctx, config.WithEC2IMDSRegion())
|
cfg, err := config.LoadDefaultConfig(ctx, config.WithEC2IMDSRegion())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &Metadata{
|
return &Cloud{
|
||||||
ec2: ec2.NewFromConfig(cfg),
|
ec2: ec2.NewFromConfig(cfg),
|
||||||
imds: imds.New(imds.Options{}),
|
imds: imds.New(imds.Options{}),
|
||||||
loadbalancer: elasticloadbalancingv2.NewFromConfig(cfg),
|
loadbalancer: elasticloadbalancingv2.NewFromConfig(cfg),
|
||||||
|
@ -71,27 +71,27 @@ func New(ctx context.Context) (*Metadata, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// List retrieves all instances belonging to the current Constellation.
|
// List retrieves all instances belonging to the current Constellation.
|
||||||
func (m *Metadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
|
func (c *Cloud) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
|
||||||
uid, err := readInstanceTag(ctx, m.imds, cloud.TagUID)
|
uid, err := readInstanceTag(ctx, c.imds, cloud.TagUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("retrieving uid tag: %w", err)
|
return nil, fmt.Errorf("retrieving uid tag: %w", err)
|
||||||
}
|
}
|
||||||
ec2Instances, err := m.getAllInstancesInGroup(ctx, uid)
|
ec2Instances, err := c.getAllInstancesInGroup(ctx, uid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("retrieving instances: %w", err)
|
return nil, fmt.Errorf("retrieving instances: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.convertToMetadataInstance(ec2Instances)
|
return c.convertToMetadataInstance(ec2Instances)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Self retrieves the current instance.
|
// Self retrieves the current instance.
|
||||||
func (m *Metadata) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
|
func (c *Cloud) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
|
||||||
identity, err := m.imds.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{})
|
identity, err := c.imds.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return metadata.InstanceMetadata{}, fmt.Errorf("retrieving instance identity: %w", err)
|
return metadata.InstanceMetadata{}, fmt.Errorf("retrieving instance identity: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
instanceRole, err := readInstanceTag(ctx, m.imds, cloud.TagRole)
|
instanceRole, err := readInstanceTag(ctx, c.imds, cloud.TagRole)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return metadata.InstanceMetadata{}, fmt.Errorf("retrieving role tag: %w", err)
|
return metadata.InstanceMetadata{}, fmt.Errorf("retrieving role tag: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -104,46 +104,18 @@ func (m *Metadata) Self(ctx context.Context) (metadata.InstanceMetadata, error)
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInstance retrieves the instance with the given providerID.
|
|
||||||
func (m *Metadata) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
|
|
||||||
instances, err := m.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
|
|
||||||
InstanceIds: []string{providerID},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return metadata.InstanceMetadata{}, fmt.Errorf("retrieving instance: %w", err)
|
|
||||||
}
|
|
||||||
if len(instances.Reservations) == 0 {
|
|
||||||
return metadata.InstanceMetadata{}, errors.New("instance not found")
|
|
||||||
}
|
|
||||||
if len(instances.Reservations) > 1 {
|
|
||||||
return metadata.InstanceMetadata{}, errors.New("providerID matches multiple instances")
|
|
||||||
}
|
|
||||||
if len(instances.Reservations[0].Instances) == 0 {
|
|
||||||
return metadata.InstanceMetadata{}, errors.New("instance not found")
|
|
||||||
}
|
|
||||||
if len(instances.Reservations[0].Instances) > 1 {
|
|
||||||
return metadata.InstanceMetadata{}, errors.New("providerID matches multiple instances")
|
|
||||||
}
|
|
||||||
instance, err := m.convertToMetadataInstance(instances.Reservations[0].Instances)
|
|
||||||
if err != nil {
|
|
||||||
return metadata.InstanceMetadata{}, fmt.Errorf("converting instance: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return instance[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UID returns the UID of the Constellation.
|
// UID returns the UID of the Constellation.
|
||||||
func (m *Metadata) UID(ctx context.Context) (string, error) {
|
func (c *Cloud) UID(ctx context.Context) (string, error) {
|
||||||
return readInstanceTag(ctx, m.imds, cloud.TagUID)
|
return readInstanceTag(ctx, c.imds, cloud.TagUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLoadBalancerEndpoint returns the endpoint of the load balancer.
|
// GetLoadBalancerEndpoint returns the endpoint of the load balancer.
|
||||||
func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
func (c *Cloud) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
||||||
uid, err := readInstanceTag(ctx, m.imds, cloud.TagUID)
|
uid, err := readInstanceTag(ctx, c.imds, cloud.TagUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("retrieving uid tag: %w", err)
|
return "", fmt.Errorf("retrieving uid tag: %w", err)
|
||||||
}
|
}
|
||||||
arns, err := m.getARNsByTag(ctx, uid, "elasticloadbalancing:loadbalancer")
|
arns, err := c.getARNsByTag(ctx, uid, "elasticloadbalancing:loadbalancer")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("retrieving load balancer ARNs: %w", err)
|
return "", fmt.Errorf("retrieving load balancer ARNs: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -151,7 +123,7 @@ func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
||||||
return "", fmt.Errorf("%d load balancers found", len(arns))
|
return "", fmt.Errorf("%d load balancers found", len(arns))
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := m.loadbalancer.DescribeLoadBalancers(ctx, &elasticloadbalancingv2.DescribeLoadBalancersInput{
|
output, err := c.loadbalancer.DescribeLoadBalancers(ctx, &elasticloadbalancingv2.DescribeLoadBalancersInput{
|
||||||
LoadBalancerArns: arns,
|
LoadBalancerArns: arns,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -175,7 +147,7 @@ func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getARNsByTag returns a list of ARNs that have the given tag.
|
// getARNsByTag returns a list of ARNs that have the given tag.
|
||||||
func (m *Metadata) getARNsByTag(ctx context.Context, uid, resourceType string) ([]string, error) {
|
func (c *Cloud) getARNsByTag(ctx context.Context, uid, resourceType string) ([]string, error) {
|
||||||
var ARNs []string
|
var ARNs []string
|
||||||
resourcesReq := &resourcegroupstaggingapi.GetResourcesInput{
|
resourcesReq := &resourcegroupstaggingapi.GetResourcesInput{
|
||||||
TagFilters: []tagType.TagFilter{
|
TagFilters: []tagType.TagFilter{
|
||||||
|
@ -187,7 +159,7 @@ func (m *Metadata) getARNsByTag(ctx context.Context, uid, resourceType string) (
|
||||||
ResourceTypeFilters: []string{resourceType},
|
ResourceTypeFilters: []string{resourceType},
|
||||||
}
|
}
|
||||||
|
|
||||||
for out, err := m.resourceapiClient.GetResources(ctx, resourcesReq); ; out, err = m.resourceapiClient.GetResources(ctx, resourcesReq) {
|
for out, err := c.resourceapiClient.GetResources(ctx, resourcesReq); ; out, err = c.resourceapiClient.GetResources(ctx, resourcesReq) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("retrieving resources: %w", err)
|
return nil, fmt.Errorf("retrieving resources: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -205,7 +177,7 @@ func (m *Metadata) getARNsByTag(ctx context.Context, uid, resourceType string) (
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) getAllInstancesInGroup(ctx context.Context, uid string) ([]ec2Types.Instance, error) {
|
func (c *Cloud) getAllInstancesInGroup(ctx context.Context, uid string) ([]ec2Types.Instance, error) {
|
||||||
var instances []ec2Types.Instance
|
var instances []ec2Types.Instance
|
||||||
instanceReq := &ec2.DescribeInstancesInput{
|
instanceReq := &ec2.DescribeInstancesInput{
|
||||||
Filters: []ec2Types.Filter{
|
Filters: []ec2Types.Filter{
|
||||||
|
@ -216,7 +188,7 @@ func (m *Metadata) getAllInstancesInGroup(ctx context.Context, uid string) ([]ec
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for out, err := m.ec2.DescribeInstances(ctx, instanceReq); ; out, err = m.ec2.DescribeInstances(ctx, instanceReq) {
|
for out, err := c.ec2.DescribeInstances(ctx, instanceReq); ; out, err = c.ec2.DescribeInstances(ctx, instanceReq) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("retrieving instances: %w", err)
|
return nil, fmt.Errorf("retrieving instances: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -232,7 +204,7 @@ func (m *Metadata) getAllInstancesInGroup(ctx context.Context, uid string) ([]ec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) convertToMetadataInstance(ec2Instances []ec2Types.Instance) ([]metadata.InstanceMetadata, error) {
|
func (c *Cloud) convertToMetadataInstance(ec2Instances []ec2Types.Instance) ([]metadata.InstanceMetadata, error) {
|
||||||
var instances []metadata.InstanceMetadata
|
var instances []metadata.InstanceMetadata
|
||||||
for _, ec2Instance := range ec2Instances {
|
for _, ec2Instance := range ec2Instances {
|
||||||
// ignore not running instances
|
// ignore not running instances
|
|
@ -120,7 +120,7 @@ func TestSelf(t *testing.T) {
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
m := &Metadata{imds: tc.imds, ec2: &stubEC2{}}
|
m := &Cloud{imds: tc.imds, ec2: &stubEC2{}}
|
||||||
|
|
||||||
self, err := m.Self(context.Background())
|
self, err := m.Self(context.Background())
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
|
@ -304,7 +304,7 @@ func TestList(t *testing.T) {
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
m := &Metadata{ec2: tc.ec2, imds: tc.imds}
|
m := &Cloud{ec2: tc.ec2, imds: tc.imds}
|
||||||
|
|
||||||
list, err := m.List(context.Background())
|
list, err := m.List(context.Background())
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
|
@ -501,7 +501,7 @@ func TestGetLoadBalancerEndpoint(t *testing.T) {
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
m := &Metadata{
|
m := &Cloud{
|
||||||
imds: tc.imds,
|
imds: tc.imds,
|
||||||
loadbalancer: tc.loadbalancer,
|
loadbalancer: tc.loadbalancer,
|
||||||
resourceapiClient: tc.resourceapi,
|
resourceapiClient: tc.resourceapi,
|
||||||
|
@ -680,7 +680,7 @@ func TestConvertToMetadataInstance(t *testing.T) {
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
m := &Metadata{}
|
m := &Cloud{}
|
||||||
|
|
||||||
instances, err := m.convertToMetadataInstance(tc.in)
|
instances, err := m.convertToMetadataInstance(tc.in)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
|
@ -93,11 +93,6 @@ func New(ctx context.Context) (*Cloud, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInstance retrieves an instance using its providerID.
|
|
||||||
func (c *Cloud) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
|
|
||||||
return c.getInstance(ctx, providerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCCMConfig returns the configuration needed for the Kubernetes Cloud Controller Manager on Azure.
|
// GetCCMConfig returns the configuration needed for the Kubernetes Cloud Controller Manager on Azure.
|
||||||
func (c *Cloud) GetCCMConfig(ctx context.Context, providerID string, cloudServiceAccountURI string) ([]byte, error) {
|
func (c *Cloud) GetCCMConfig(ctx context.Context, providerID string, cloudServiceAccountURI string) ([]byte, error) {
|
||||||
subscriptionID, resourceGroup, err := azureshared.BasicsFromProviderID(providerID)
|
subscriptionID, resourceGroup, err := azureshared.BasicsFromProviderID(providerID)
|
||||||
|
|
|
@ -445,60 +445,6 @@ func TestGetInstance(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelfGetInstance(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
cloud := &Cloud{
|
|
||||||
scaleSetsVMAPI: &stubVirtualMachineScaleSetVMsAPI{
|
|
||||||
getVM: armcomputev2.VirtualMachineScaleSetVM{
|
|
||||||
Name: to.Ptr("scale-set-name-instance-id"),
|
|
||||||
ID: to.Ptr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id"),
|
|
||||||
Properties: &armcomputev2.VirtualMachineScaleSetVMProperties{
|
|
||||||
OSProfile: &armcomputev2.OSProfile{
|
|
||||||
ComputerName: to.Ptr("scale-set-name-instance-id"),
|
|
||||||
},
|
|
||||||
NetworkProfile: &armcomputev2.NetworkProfile{
|
|
||||||
NetworkInterfaces: []*armcomputev2.NetworkInterfaceReference{
|
|
||||||
{
|
|
||||||
ID: to.Ptr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Network/networkInterfaces/nic-name"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Tags: map[string]*string{
|
|
||||||
cloud.TagRole: to.Ptr(role.Worker.String()),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
netIfacAPI: &stubNetworkInterfacesAPI{
|
|
||||||
getInterface: armnetwork.Interface{
|
|
||||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
|
||||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
|
||||||
{
|
|
||||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
|
||||||
Primary: to.Ptr(true),
|
|
||||||
PrivateIPAddress: to.Ptr("192.0.2.1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
imds: &stubIMDSAPI{
|
|
||||||
providerIDVal: "/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
self, err := cloud.Self(context.Background())
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
instance, err := cloud.GetInstance(context.Background(), self.ProviderID)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
assert.Equal(self, instance)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUID(t *testing.T) {
|
func TestUID(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
imdsAPI *stubIMDSAPI
|
imdsAPI *stubIMDSAPI
|
||||||
|
|
|
@ -80,16 +80,6 @@ func (c *Cloud) Close() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInstance retrieves an instance using its providerID.
|
|
||||||
func (c *Cloud) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
|
|
||||||
project, zone, instanceName, err := gcpshared.SplitProviderID(providerID)
|
|
||||||
if err != nil {
|
|
||||||
return metadata.InstanceMetadata{}, fmt.Errorf("invalid providerID: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.getInstance(ctx, project, zone, instanceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLoadBalancerEndpoint returns the endpoint of the load balancer.
|
// GetLoadBalancerEndpoint returns the endpoint of the load balancer.
|
||||||
func (c *Cloud) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
func (c *Cloud) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
||||||
project, zone, instanceName, err := c.retrieveInstanceInfo()
|
project, zone, instanceName, err := c.retrieveInstanceInfo()
|
||||||
|
|
|
@ -748,58 +748,6 @@ func TestUID(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSelfGetInstance(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
cloud := &Cloud{
|
|
||||||
imds: &stubIMDS{
|
|
||||||
projectID: "someProject",
|
|
||||||
zone: "someZone-west3-b",
|
|
||||||
instanceName: "someInstance",
|
|
||||||
},
|
|
||||||
instanceAPI: &stubInstanceAPI{
|
|
||||||
instance: &computepb.Instance{
|
|
||||||
Name: proto.String("someInstance"),
|
|
||||||
Zone: proto.String("someZone-west3-b"),
|
|
||||||
Labels: map[string]string{
|
|
||||||
cloud.TagUID: "1234",
|
|
||||||
cloud.TagRole: role.ControlPlane.String(),
|
|
||||||
},
|
|
||||||
NetworkInterfaces: []*computepb.NetworkInterface{
|
|
||||||
{
|
|
||||||
Name: proto.String("nic0"),
|
|
||||||
NetworkIP: proto.String("192.0.2.0"),
|
|
||||||
AliasIpRanges: []*computepb.AliasIpRange{
|
|
||||||
{
|
|
||||||
IpCidrRange: proto.String("192.0.3.0/8"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Subnetwork: proto.String("projects/someProject/regions/someRegion/subnetworks/someSubnetwork"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
subnetAPI: &stubSubnetAPI{
|
|
||||||
subnet: &computepb.Subnetwork{
|
|
||||||
SecondaryIpRanges: []*computepb.SubnetworkSecondaryRange{
|
|
||||||
{
|
|
||||||
IpCidrRange: proto.String("198.51.100.0/24"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
self, err := cloud.Self(context.Background())
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
instance, err := cloud.GetInstance(context.Background(), self.ProviderID)
|
|
||||||
require.NoError(err)
|
|
||||||
|
|
||||||
assert.Equal(self, instance)
|
|
||||||
}
|
|
||||||
|
|
||||||
type stubForwardingRulesAPI struct {
|
type stubForwardingRulesAPI struct {
|
||||||
iterator forwardingRuleIterator
|
iterator forwardingRuleIterator
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ package qemu
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -19,12 +18,17 @@ import (
|
||||||
|
|
||||||
const qemuMetadataEndpoint = "10.42.0.1:8080"
|
const qemuMetadataEndpoint = "10.42.0.1:8080"
|
||||||
|
|
||||||
// Metadata implements core.ProviderMetadata interface for QEMU.
|
// Cloud provides an interface to fake a CSP API for QEMU instances.
|
||||||
type Metadata struct{}
|
type Cloud struct{}
|
||||||
|
|
||||||
|
// New returns a new Cloud instance.
|
||||||
|
func New() *Cloud {
|
||||||
|
return &Cloud{}
|
||||||
|
}
|
||||||
|
|
||||||
// List retrieves all instances belonging to the current constellation.
|
// List retrieves all instances belonging to the current constellation.
|
||||||
func (m *Metadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
|
func (c *Cloud) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
|
||||||
instancesRaw, err := m.retrieveMetadata(ctx, "/peers")
|
instancesRaw, err := c.retrieveMetadata(ctx, "/peers")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -35,8 +39,8 @@ func (m *Metadata) List(ctx context.Context) ([]metadata.InstanceMetadata, error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Self retrieves the current instance.
|
// Self retrieves the current instance.
|
||||||
func (m *Metadata) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
|
func (c *Cloud) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
|
||||||
instanceRaw, err := m.retrieveMetadata(ctx, "/self")
|
instanceRaw, err := c.retrieveMetadata(ctx, "/self")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return metadata.InstanceMetadata{}, err
|
return metadata.InstanceMetadata{}, err
|
||||||
}
|
}
|
||||||
|
@ -46,25 +50,10 @@ func (m *Metadata) Self(ctx context.Context) (metadata.InstanceMetadata, error)
|
||||||
return instance, err
|
return instance, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInstance retrieves an instance using its providerID.
|
|
||||||
func (m *Metadata) GetInstance(ctx context.Context, providerID string) (metadata.InstanceMetadata, error) {
|
|
||||||
instances, err := m.List(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return metadata.InstanceMetadata{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, instance := range instances {
|
|
||||||
if instance.ProviderID == providerID {
|
|
||||||
return instance, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return metadata.InstanceMetadata{}, errors.New("instance not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLoadBalancerEndpoint returns the endpoint of the load balancer.
|
// GetLoadBalancerEndpoint returns the endpoint of the load balancer.
|
||||||
// For QEMU, the load balancer is the first control plane node returned by the metadata API.
|
// For QEMU, the load balancer is the first control plane node returned by the metadata API.
|
||||||
func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
func (c *Cloud) GetLoadBalancerEndpoint(ctx context.Context) (string, error) {
|
||||||
endpointRaw, err := m.retrieveMetadata(ctx, "/endpoint")
|
endpointRaw, err := c.retrieveMetadata(ctx, "/endpoint")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -74,13 +63,13 @@ func (m *Metadata) GetLoadBalancerEndpoint(ctx context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UID returns the UID of the constellation.
|
// UID returns the UID of the constellation.
|
||||||
func (m *Metadata) UID(ctx context.Context) (string, error) {
|
func (c *Cloud) UID(ctx context.Context) (string, error) {
|
||||||
// We expect only one constellation to be deployed in the same QEMU / libvirt environment.
|
// We expect only one constellation to be deployed in the same QEMU / libvirt environment.
|
||||||
// the UID can be an empty string.
|
// the UID can be an empty string.
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) retrieveMetadata(ctx context.Context, uri string) ([]byte, error) {
|
func (c *Cloud) retrieveMetadata(ctx context.Context, uri string) ([]byte, error) {
|
||||||
url := &url.URL{
|
url := &url.URL{
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
Host: qemuMetadataEndpoint,
|
Host: qemuMetadataEndpoint,
|
|
@ -135,7 +135,7 @@ func getVPCIP(ctx context.Context, provider string) (string, error) {
|
||||||
defer gcpMeta.Close()
|
defer gcpMeta.Close()
|
||||||
metadata = gcpMeta
|
metadata = gcpMeta
|
||||||
case cloudprovider.QEMU:
|
case cloudprovider.QEMU:
|
||||||
metadata = &qemucloud.Metadata{}
|
metadata = qemucloud.New()
|
||||||
default:
|
default:
|
||||||
return "", errors.New("unsupported cloud provider")
|
return "", errors.New("unsupported cloud provider")
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue