mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-24 23:19:39 -05:00
replace flannel with cilium
This commit is contained in:
parent
7e1c898870
commit
791d5564ba
@ -34,6 +34,18 @@ type networkSecurityGroupsAPI interface {
|
||||
networkSecurityGroupsCreateOrUpdatePollerResponse, error)
|
||||
}
|
||||
|
||||
type loadBalancersClientCreateOrUpdatePollerResponse interface {
|
||||
PollUntilDone(ctx context.Context, freq time.Duration) (armnetwork.LoadBalancersClientCreateOrUpdateResponse, error)
|
||||
}
|
||||
|
||||
type loadBalancersAPI interface {
|
||||
BeginCreateOrUpdate(ctx context.Context, resourceGroupName string,
|
||||
loadBalancerName string, parameters armnetwork.LoadBalancer,
|
||||
options *armnetwork.LoadBalancersClientBeginCreateOrUpdateOptions) (
|
||||
loadBalancersClientCreateOrUpdatePollerResponse, error,
|
||||
)
|
||||
}
|
||||
|
||||
type virtualMachineScaleSetsCreateOrUpdatePollerResponse interface {
|
||||
PollUntilDone(ctx context.Context, freq time.Duration) (armcompute.VirtualMachineScaleSetsClientCreateOrUpdateResponse, error)
|
||||
}
|
||||
|
@ -48,6 +48,29 @@ func (a stubNetworksAPI) BeginCreateOrUpdate(ctx context.Context, resourceGroupN
|
||||
return a.stubResponse, a.createErr
|
||||
}
|
||||
|
||||
type stubLoadBalancersAPI struct {
|
||||
createErr error
|
||||
stubResponse stubLoadBalancersClientCreateOrUpdatePollerResponse
|
||||
}
|
||||
|
||||
type stubLoadBalancersClientCreateOrUpdatePollerResponse struct {
|
||||
pollResponse armnetwork.LoadBalancersClientCreateOrUpdateResponse
|
||||
pollErr error
|
||||
}
|
||||
|
||||
func (r stubLoadBalancersClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration,
|
||||
) (armnetwork.LoadBalancersClientCreateOrUpdateResponse, error) {
|
||||
return r.pollResponse, r.pollErr
|
||||
}
|
||||
|
||||
func (a stubLoadBalancersAPI) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string,
|
||||
loadBalancerName string, parameters armnetwork.LoadBalancer,
|
||||
options *armnetwork.LoadBalancersClientBeginCreateOrUpdateOptions) (
|
||||
loadBalancersClientCreateOrUpdatePollerResponse, error,
|
||||
) {
|
||||
return a.stubResponse, a.createErr
|
||||
}
|
||||
|
||||
type stubNetworkSecurityGroupsCreateOrUpdatePollerResponse struct {
|
||||
armnetwork.SecurityGroupsClientCreateOrUpdatePollerResponse
|
||||
pollerErr error
|
||||
@ -143,23 +166,17 @@ func (a stubScaleSetsAPI) BeginCreateOrUpdate(ctx context.Context, resourceGroup
|
||||
return a.stubResponse, a.createErr
|
||||
}
|
||||
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
type stubPublicIPAddressesAPI struct {
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
createErr error
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
getErr error
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
createErr error
|
||||
getErr error
|
||||
stubCreateResponse stubPublicIPAddressesClientCreateOrUpdatePollerResponse
|
||||
}
|
||||
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
type stubPublicIPAddressesClientCreateOrUpdatePollerResponse struct {
|
||||
armnetwork.PublicIPAddressesClientCreateOrUpdatePollerResponse
|
||||
pollErr error
|
||||
}
|
||||
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
func (r stubPublicIPAddressesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (
|
||||
armnetwork.PublicIPAddressesClientCreateOrUpdateResponse, error,
|
||||
) {
|
||||
@ -167,6 +184,9 @@ func (r stubPublicIPAddressesClientCreateOrUpdatePollerResponse) PollUntilDone(c
|
||||
PublicIPAddressesClientCreateOrUpdateResult: armnetwork.PublicIPAddressesClientCreateOrUpdateResult{
|
||||
PublicIPAddress: armnetwork.PublicIPAddress{
|
||||
ID: to.StringPtr("pubIP-id"),
|
||||
Properties: &armnetwork.PublicIPAddressPropertiesFormat{
|
||||
IPAddress: to.StringPtr("192.0.2.1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}, r.pollErr
|
||||
@ -206,7 +226,6 @@ func (a stubPublicIPAddressesAPI) ListVirtualMachineScaleSetVMPublicIPAddresses(
|
||||
return &stubPublicIPAddressesListVirtualMachineScaleSetVMPublicIPAddressesPager{pagesCounter: 0, PagesMax: 1}
|
||||
}
|
||||
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
func (a stubPublicIPAddressesAPI) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string,
|
||||
parameters armnetwork.PublicIPAddress, options *armnetwork.PublicIPAddressesClientBeginCreateOrUpdateOptions) (
|
||||
publicIPAddressesClientCreateOrUpdatePollerResponse, error,
|
||||
@ -214,7 +233,6 @@ func (a stubPublicIPAddressesAPI) BeginCreateOrUpdate(ctx context.Context, resou
|
||||
return a.stubCreateResponse, a.createErr
|
||||
}
|
||||
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
func (a stubPublicIPAddressesAPI) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *armnetwork.PublicIPAddressesClientGetOptions) (
|
||||
armnetwork.PublicIPAddressesClientGetResponse, error,
|
||||
) {
|
||||
@ -230,11 +248,9 @@ func (a stubPublicIPAddressesAPI) Get(ctx context.Context, resourceGroupName str
|
||||
}
|
||||
|
||||
type stubNetworkInterfacesAPI struct {
|
||||
getErr error
|
||||
// TODO: deprecate as soon as scale sets are available
|
||||
getErr error
|
||||
createErr error
|
||||
// TODO: deprecate as soon as scale sets are available
|
||||
stubResp stubInterfacesClientCreateOrUpdatePollerResponse
|
||||
stubResp stubInterfacesClientCreateOrUpdatePollerResponse
|
||||
}
|
||||
|
||||
func (a stubNetworkInterfacesAPI) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string,
|
||||
|
@ -35,6 +35,17 @@ func (c *networkInterfacesClient) BeginCreateOrUpdate(ctx context.Context, resou
|
||||
return c.InterfacesClient.BeginCreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters, options)
|
||||
}
|
||||
|
||||
type loadBalancersClient struct {
|
||||
*armnetwork.LoadBalancersClient
|
||||
}
|
||||
|
||||
func (c *loadBalancersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string,
|
||||
parameters armnetwork.LoadBalancer, options *armnetwork.LoadBalancersClientBeginCreateOrUpdateOptions) (
|
||||
loadBalancersClientCreateOrUpdatePollerResponse, error,
|
||||
) {
|
||||
return c.LoadBalancersClient.BeginCreateOrUpdate(ctx, resourceGroupName, loadBalancerName, parameters, options)
|
||||
}
|
||||
|
||||
type networkSecurityGroupsClient struct {
|
||||
*armnetwork.SecurityGroupsClient
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ type Client struct {
|
||||
scaleSetsAPI
|
||||
publicIPAddressesAPI
|
||||
networkInterfacesAPI
|
||||
loadBalancersAPI
|
||||
virtualMachinesAPI
|
||||
applicationsAPI
|
||||
servicePrincipalsAPI
|
||||
@ -53,6 +54,8 @@ type Client struct {
|
||||
subnetID string
|
||||
coordinatorsScaleSet string
|
||||
nodesScaleSet string
|
||||
loadBalancerName string
|
||||
loadBalancerPubIP string
|
||||
networkSecurityGroup string
|
||||
adAppObjectID string
|
||||
}
|
||||
@ -77,6 +80,7 @@ func NewFromDefault(subscriptionID, tenantID string) (*Client, error) {
|
||||
scaleSetAPI := armcompute.NewVirtualMachineScaleSetsClient(subscriptionID, cred, nil)
|
||||
publicIPAddressesAPI := armnetwork.NewPublicIPAddressesClient(subscriptionID, cred, nil)
|
||||
networkInterfacesAPI := armnetwork.NewInterfacesClient(subscriptionID, cred, nil)
|
||||
loadBalancersAPI := armnetwork.NewLoadBalancersClient(subscriptionID, cred, nil)
|
||||
virtualMachinesAPI := armcompute.NewVirtualMachinesClient(subscriptionID, cred, nil)
|
||||
applicationsAPI := graphrbac.NewApplicationsClient(tenantID)
|
||||
applicationsAPI.Authorizer = graphAuthorizer
|
||||
@ -92,6 +96,7 @@ func NewFromDefault(subscriptionID, tenantID string) (*Client, error) {
|
||||
scaleSetsAPI: &virtualMachineScaleSetsClient{scaleSetAPI},
|
||||
publicIPAddressesAPI: &publicIPAddressesClient{publicIPAddressesAPI},
|
||||
networkInterfacesAPI: &networkInterfacesClient{networkInterfacesAPI},
|
||||
loadBalancersAPI: &loadBalancersClient{loadBalancersAPI},
|
||||
applicationsAPI: &applicationsClient{&applicationsAPI},
|
||||
servicePrincipalsAPI: &servicePrincipalsClient{&servicePrincipalsAPI},
|
||||
roleAssignmentsAPI: &roleAssignmentsClient{&roleAssignmentsAPI},
|
||||
@ -233,7 +238,7 @@ func (c *Client) SetState(stat state.ConstellationState) error {
|
||||
}
|
||||
c.coordinatorsScaleSet = stat.AzureCoordinatorsScaleSet
|
||||
if len(stat.AzureNodes) == 0 {
|
||||
return errors.New("state has no coordinator scale set")
|
||||
return errors.New("state has no nodes")
|
||||
}
|
||||
c.nodes = stat.AzureNodes
|
||||
if len(stat.AzureCoordinators) == 0 {
|
||||
|
@ -13,13 +13,14 @@ import (
|
||||
func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput) error {
|
||||
// Create nodes scale set
|
||||
createNodesInput := CreateScaleSetInput{
|
||||
Name: "constellation-scale-set-nodes-" + c.uid,
|
||||
NamePrefix: c.name + "-worker-" + c.uid + "-",
|
||||
Count: input.CountNodes,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: int32(input.StateDiskSizeGB),
|
||||
Image: input.Image,
|
||||
UserAssingedIdentity: input.UserAssingedIdentity,
|
||||
Name: "constellation-scale-set-nodes-" + c.uid,
|
||||
NamePrefix: c.name + "-worker-" + c.uid + "-",
|
||||
Count: input.CountNodes,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: int32(input.StateDiskSizeGB),
|
||||
Image: input.Image,
|
||||
UserAssingedIdentity: input.UserAssingedIdentity,
|
||||
LoadBalancerBackendAddressPool: azure.BackendAddressPoolWorkerName + "-" + c.uid,
|
||||
}
|
||||
|
||||
if err := c.createScaleSet(ctx, createNodesInput); err != nil {
|
||||
@ -30,13 +31,14 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
||||
|
||||
// Create coordinator scale set
|
||||
createCoordinatorsInput := CreateScaleSetInput{
|
||||
Name: "constellation-scale-set-coordinators-" + c.uid,
|
||||
NamePrefix: c.name + "-control-plane-" + c.uid + "-",
|
||||
Count: input.CountCoordinators,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: int32(input.StateDiskSizeGB),
|
||||
Image: input.Image,
|
||||
UserAssingedIdentity: input.UserAssingedIdentity,
|
||||
Name: "constellation-scale-set-coordinators-" + c.uid,
|
||||
NamePrefix: c.name + "-control-plane-" + c.uid + "-",
|
||||
Count: input.CountCoordinators,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: int32(input.StateDiskSizeGB),
|
||||
Image: input.Image,
|
||||
UserAssingedIdentity: input.UserAssingedIdentity,
|
||||
LoadBalancerBackendAddressPool: azure.BackendAddressPoolControlPlaneName + "-" + c.uid,
|
||||
}
|
||||
|
||||
if err := c.createScaleSet(ctx, createCoordinatorsInput); err != nil {
|
||||
@ -58,6 +60,14 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
||||
}
|
||||
c.coordinators = instances
|
||||
|
||||
// Set the load balancer public IP in the first coordinator
|
||||
coord, ok := c.coordinators["0"]
|
||||
if !ok {
|
||||
return errors.New("coordinator 0 not found")
|
||||
}
|
||||
coord.PublicIP = c.loadBalancerPubIP
|
||||
c.coordinators["0"] = coord
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -119,13 +129,13 @@ func (c *Client) CreateInstancesVMs(ctx context.Context, input CreateInstancesIn
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
func (c *Client) createInstanceVM(ctx context.Context, input azure.VMInstance) (azure.Instance, error) {
|
||||
pubIPName := input.Name + "-pubIP"
|
||||
pubIPID, err := c.createPublicIPAddress(ctx, pubIPName)
|
||||
pubIP, err := c.createPublicIPAddress(ctx, pubIPName)
|
||||
if err != nil {
|
||||
return azure.Instance{}, err
|
||||
}
|
||||
|
||||
nicName := input.Name + "-NIC"
|
||||
privIP, nicID, err := c.createNIC(ctx, nicName, pubIPID)
|
||||
privIP, nicID, err := c.createNIC(ctx, nicName, *pubIP.ID)
|
||||
if err != nil {
|
||||
return azure.Instance{}, err
|
||||
}
|
||||
@ -167,18 +177,22 @@ func (c *Client) createScaleSet(ctx context.Context, input CreateScaleSetInput)
|
||||
return err
|
||||
}
|
||||
scaleSet := azure.ScaleSet{
|
||||
Name: input.Name,
|
||||
NamePrefix: input.NamePrefix,
|
||||
Location: c.location,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: input.StateDiskSizeGB,
|
||||
Count: int64(input.Count),
|
||||
Username: "constellation",
|
||||
SubnetID: c.subnetID,
|
||||
NetworkSecurityGroup: c.networkSecurityGroup,
|
||||
Image: input.Image,
|
||||
Password: pw,
|
||||
UserAssignedIdentity: input.UserAssingedIdentity,
|
||||
Name: input.Name,
|
||||
NamePrefix: input.NamePrefix,
|
||||
Location: c.location,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: input.StateDiskSizeGB,
|
||||
Count: int64(input.Count),
|
||||
Username: "constellation",
|
||||
SubnetID: c.subnetID,
|
||||
NetworkSecurityGroup: c.networkSecurityGroup,
|
||||
Image: input.Image,
|
||||
Password: pw,
|
||||
UserAssignedIdentity: input.UserAssingedIdentity,
|
||||
Subscription: c.subscriptionID,
|
||||
ResourceGroup: c.resourceGroup,
|
||||
LoadBalancerName: c.loadBalancerName,
|
||||
LoadBalancerBackendAddressPool: input.LoadBalancerBackendAddressPool,
|
||||
}.Azure()
|
||||
|
||||
poller, err := c.scaleSetsAPI.BeginCreateOrUpdate(
|
||||
@ -242,13 +256,14 @@ func (c *Client) getInstanceIPs(ctx context.Context, scaleSet string, count int)
|
||||
|
||||
// CreateScaleSetInput is the input for a CreateScaleSet operation.
|
||||
type CreateScaleSetInput struct {
|
||||
Name string
|
||||
NamePrefix string
|
||||
Count int
|
||||
InstanceType string
|
||||
StateDiskSizeGB int32
|
||||
Image string
|
||||
UserAssingedIdentity string
|
||||
Name string
|
||||
NamePrefix string
|
||||
Count int
|
||||
InstanceType string
|
||||
StateDiskSizeGB int32
|
||||
Image string
|
||||
UserAssingedIdentity string
|
||||
LoadBalancerBackendAddressPool string
|
||||
}
|
||||
|
||||
// CreateResourceGroup creates a resource group.
|
||||
|
@ -216,6 +216,7 @@ func TestCreateInstances(t *testing.T) {
|
||||
roleAssignmentsAPI: tc.roleAssignmentsAPI,
|
||||
nodes: make(azure.Instances),
|
||||
coordinators: make(azure.Instances),
|
||||
loadBalancerPubIP: "lbip",
|
||||
}
|
||||
|
||||
if tc.wantErr {
|
||||
@ -227,7 +228,7 @@ func TestCreateInstances(t *testing.T) {
|
||||
assert.NotEmpty(client.nodes["0"].PrivateIP)
|
||||
assert.NotEmpty(client.nodes["0"].PublicIP)
|
||||
assert.NotEmpty(client.coordinators["0"].PrivateIP)
|
||||
assert.NotEmpty(client.coordinators["0"].PublicIP)
|
||||
assert.Equal("lbip", client.coordinators["0"].PublicIP)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -6,21 +6,34 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/edgelesssys/constellation/cli/azure"
|
||||
"github.com/edgelesssys/constellation/cli/cloud/cloudtypes"
|
||||
)
|
||||
|
||||
type createNetworkInput struct {
|
||||
name string
|
||||
location string
|
||||
addressSpace string
|
||||
name string
|
||||
location string
|
||||
addressSpace string
|
||||
nodeAddressSpace string
|
||||
podAddressSpace string
|
||||
}
|
||||
|
||||
const (
|
||||
nodeNetworkName = "nodeNetwork"
|
||||
podNetworkName = "podNetwork"
|
||||
networkAddressSpace = "10.0.0.0/8"
|
||||
nodeAddressSpace = "10.9.0.0/16"
|
||||
podAddressSpace = "10.10.0.0/16"
|
||||
)
|
||||
|
||||
// CreateVirtualNetwork creates a virtual network.
|
||||
func (c *Client) CreateVirtualNetwork(ctx context.Context) error {
|
||||
createNetworkInput := createNetworkInput{
|
||||
name: "constellation-" + c.uid,
|
||||
location: c.location,
|
||||
addressSpace: "172.20.0.0/16",
|
||||
name: "constellation-" + c.uid,
|
||||
location: c.location,
|
||||
addressSpace: networkAddressSpace,
|
||||
nodeAddressSpace: nodeAddressSpace,
|
||||
podAddressSpace: podAddressSpace,
|
||||
}
|
||||
|
||||
poller, err := c.networksAPI.BeginCreateOrUpdate(
|
||||
@ -36,9 +49,15 @@ func (c *Client) CreateVirtualNetwork(ctx context.Context) error {
|
||||
},
|
||||
Subnets: []*armnetwork.Subnet{
|
||||
{
|
||||
Name: to.StringPtr("default"),
|
||||
Name: to.StringPtr(nodeNetworkName),
|
||||
Properties: &armnetwork.SubnetPropertiesFormat{
|
||||
AddressPrefix: to.StringPtr(createNetworkInput.addressSpace),
|
||||
AddressPrefix: to.StringPtr(createNetworkInput.nodeAddressSpace),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr(podNetworkName),
|
||||
Properties: &armnetwork.SubnetPropertiesFormat{
|
||||
AddressPrefix: to.StringPtr(createNetworkInput.podAddressSpace),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -141,25 +160,29 @@ func (c *Client) createNIC(ctx context.Context, name, publicIPAddressID string)
|
||||
nil
|
||||
}
|
||||
|
||||
// createPublicIPAddress creates a public IP address.
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
func (c *Client) createPublicIPAddress(ctx context.Context, name string) (string, error) {
|
||||
func (c *Client) createPublicIPAddress(ctx context.Context, name string) (*armnetwork.PublicIPAddress, error) {
|
||||
poller, err := c.publicIPAddressesAPI.BeginCreateOrUpdate(
|
||||
ctx, c.resourceGroup, name,
|
||||
armnetwork.PublicIPAddress{
|
||||
Location: to.StringPtr(c.location),
|
||||
SKU: &armnetwork.PublicIPAddressSKU{
|
||||
Name: armnetwork.PublicIPAddressSKUNameStandard.ToPtr(),
|
||||
},
|
||||
Properties: &armnetwork.PublicIPAddressPropertiesFormat{
|
||||
PublicIPAllocationMethod: armnetwork.IPAllocationMethodStatic.ToPtr(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
pollerResp, err := poller.PollUntilDone(ctx, 30*time.Second)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return *pollerResp.PublicIPAddressesClientCreateOrUpdateResult.PublicIPAddress.ID, nil
|
||||
return &pollerResp.PublicIPAddressesClientCreateOrUpdateResult.PublicIPAddress, nil
|
||||
}
|
||||
|
||||
// NetworkSecurityGroupInput defines firewall rules to be set.
|
||||
@ -167,3 +190,42 @@ type NetworkSecurityGroupInput struct {
|
||||
Ingress cloudtypes.Firewall
|
||||
Egress cloudtypes.Firewall
|
||||
}
|
||||
|
||||
// CreateExternalLoadBalancer creates an external load balancer.
|
||||
func (c *Client) CreateExternalLoadBalancer(ctx context.Context) error {
|
||||
// First, create a public IP address for the load balancer.
|
||||
publicIPAddress, err := c.createPublicIPAddress(ctx, "loadbalancer-public-ip-"+c.uid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then, create the load balancer.
|
||||
loadBalancerName := "constellation-load-balancer-" + c.uid
|
||||
loadBalancer := azure.LoadBalancer{
|
||||
Name: loadBalancerName,
|
||||
Location: c.location,
|
||||
ResourceGroup: c.resourceGroup,
|
||||
Subscription: c.subscriptionID,
|
||||
PublicIPID: *publicIPAddress.ID,
|
||||
UID: c.uid,
|
||||
}
|
||||
azureLoadBalancer := loadBalancer.Azure()
|
||||
|
||||
poller, err := c.loadBalancersAPI.BeginCreateOrUpdate(
|
||||
ctx, c.resourceGroup, loadBalancerName,
|
||||
azureLoadBalancer,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = poller.PollUntilDone(ctx, 30*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.loadBalancerName = loadBalancerName
|
||||
|
||||
c.loadBalancerPubIP = *publicIPAddress.Properties.IPAddress
|
||||
return nil
|
||||
}
|
||||
|
@ -170,7 +170,6 @@ func TestCreateNIC(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: deprecate as soon as scale sets are available.
|
||||
func TestCreatePublicIPAddress(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
|
||||
@ -218,3 +217,58 @@ func TestCreatePublicIPAddress(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
|
||||
testCases := map[string]struct {
|
||||
publicIPAddressesAPI publicIPAddressesAPI
|
||||
loadBalancersAPI loadBalancersAPI
|
||||
wantErr bool
|
||||
}{
|
||||
"successful create": {
|
||||
publicIPAddressesAPI: stubPublicIPAddressesAPI{stubCreateResponse: stubPublicIPAddressesClientCreateOrUpdatePollerResponse{}},
|
||||
loadBalancersAPI: stubLoadBalancersAPI{},
|
||||
},
|
||||
"failed to get response from successful create": {
|
||||
loadBalancersAPI: stubLoadBalancersAPI{stubResponse: stubLoadBalancersClientCreateOrUpdatePollerResponse{pollErr: someErr}},
|
||||
publicIPAddressesAPI: stubPublicIPAddressesAPI{},
|
||||
wantErr: true,
|
||||
},
|
||||
"failed create": {
|
||||
loadBalancersAPI: stubLoadBalancersAPI{createErr: someErr},
|
||||
publicIPAddressesAPI: stubPublicIPAddressesAPI{},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot create public IP": {
|
||||
publicIPAddressesAPI: stubPublicIPAddressesAPI{createErr: someErr},
|
||||
loadBalancersAPI: stubLoadBalancersAPI{},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ctx := context.Background()
|
||||
client := Client{
|
||||
resourceGroup: "resource-group",
|
||||
location: "location",
|
||||
name: "name",
|
||||
uid: "uid",
|
||||
nodes: make(azure.Instances),
|
||||
coordinators: make(azure.Instances),
|
||||
loadBalancersAPI: tc.loadBalancersAPI,
|
||||
publicIPAddressesAPI: tc.publicIPAddressesAPI,
|
||||
}
|
||||
|
||||
err := client.CreateExternalLoadBalancer(ctx)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
162
cli/azure/loadbalancer.go
Normal file
162
cli/azure/loadbalancer.go
Normal file
@ -0,0 +1,162 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
)
|
||||
|
||||
// LoadBalancer defines a Azure load balancer.
|
||||
type LoadBalancer struct {
|
||||
Name string
|
||||
Subscription string
|
||||
ResourceGroup string
|
||||
Location string
|
||||
PublicIPID string
|
||||
UID string
|
||||
}
|
||||
|
||||
const (
|
||||
BackendAddressPoolWorkerName = "backendAddressWorkerPool"
|
||||
BackendAddressPoolControlPlaneName = "backendAddressControlPlanePool"
|
||||
)
|
||||
|
||||
// Azure returns a Azure representation of LoadBalancer.
|
||||
func (l LoadBalancer) Azure() armnetwork.LoadBalancer {
|
||||
frontEndIPConfigName := "frontEndIPConfig"
|
||||
kubeHealthProbeName := "kubeHealthProbe"
|
||||
coordHealthProbeName := "coordHealthProbe"
|
||||
debugdHealthProbeName := "debugdHealthProbe"
|
||||
backEndAddressPoolNodeName := BackendAddressPoolWorkerName + "-" + l.UID
|
||||
backEndAddressPoolControlPlaneName := BackendAddressPoolControlPlaneName + "-" + l.UID
|
||||
|
||||
return armnetwork.LoadBalancer{
|
||||
Name: to.StringPtr(l.Name),
|
||||
Location: to.StringPtr(l.Location),
|
||||
SKU: &armnetwork.LoadBalancerSKU{Name: armnetwork.LoadBalancerSKUNameStandard.ToPtr()},
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
Name: to.StringPtr(frontEndIPConfigName),
|
||||
Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: to.StringPtr(l.PublicIPID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
BackendAddressPools: []*armnetwork.BackendAddressPool{
|
||||
{
|
||||
Name: to.StringPtr(backEndAddressPoolNodeName),
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr(backEndAddressPoolControlPlaneName),
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr("all"),
|
||||
},
|
||||
},
|
||||
Probes: []*armnetwork.Probe{
|
||||
{
|
||||
Name: to.StringPtr(kubeHealthProbeName),
|
||||
Properties: &armnetwork.ProbePropertiesFormat{
|
||||
Protocol: armnetwork.ProbeProtocolTCP.ToPtr(),
|
||||
Port: to.Int32Ptr(int32(6443)),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr(coordHealthProbeName),
|
||||
Properties: &armnetwork.ProbePropertiesFormat{
|
||||
Protocol: armnetwork.ProbeProtocolTCP.ToPtr(),
|
||||
Port: to.Int32Ptr(int32(constants.CoordinatorPort)),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr(debugdHealthProbeName),
|
||||
Properties: &armnetwork.ProbePropertiesFormat{
|
||||
Protocol: armnetwork.ProbeProtocolTCP.ToPtr(),
|
||||
Port: to.Int32Ptr(int32(4000)),
|
||||
},
|
||||
},
|
||||
},
|
||||
LoadBalancingRules: []*armnetwork.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("kubeLoadBalancerRule"),
|
||||
Properties: &armnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/frontendIPConfigurations/" + frontEndIPConfigName),
|
||||
},
|
||||
FrontendPort: to.Int32Ptr(int32(6443)),
|
||||
BackendPort: to.Int32Ptr(int32(6443)),
|
||||
Protocol: armnetwork.TransportProtocolTCP.ToPtr(),
|
||||
Probe: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/probes/" + kubeHealthProbeName),
|
||||
},
|
||||
DisableOutboundSnat: to.BoolPtr(true),
|
||||
BackendAddressPools: []*armnetwork.SubResource{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/backendAddressPools/" + backEndAddressPoolControlPlaneName),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr("coordLoadBalancerRule"),
|
||||
Properties: &armnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/frontendIPConfigurations/" + frontEndIPConfigName),
|
||||
},
|
||||
FrontendPort: to.Int32Ptr(int32(constants.CoordinatorPort)),
|
||||
BackendPort: to.Int32Ptr(int32(constants.CoordinatorPort)),
|
||||
Protocol: armnetwork.TransportProtocolTCP.ToPtr(),
|
||||
Probe: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/probes/" + coordHealthProbeName),
|
||||
},
|
||||
DisableOutboundSnat: to.BoolPtr(true),
|
||||
BackendAddressPools: []*armnetwork.SubResource{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/backendAddressPools/" + backEndAddressPoolControlPlaneName),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr("debudLoadBalancerRule"),
|
||||
Properties: &armnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/frontendIPConfigurations/" + frontEndIPConfigName),
|
||||
},
|
||||
FrontendPort: to.Int32Ptr(int32(4000)),
|
||||
BackendPort: to.Int32Ptr(int32(4000)),
|
||||
Protocol: armnetwork.TransportProtocolTCP.ToPtr(),
|
||||
Probe: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/probes/" + debugdHealthProbeName),
|
||||
},
|
||||
DisableOutboundSnat: to.BoolPtr(true),
|
||||
BackendAddressPools: []*armnetwork.SubResource{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/backendAddressPools/" + backEndAddressPoolControlPlaneName),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
OutboundRules: []*armnetwork.OutboundRule{
|
||||
{
|
||||
Name: to.StringPtr("outboundRuleControlPlane"),
|
||||
Properties: &armnetwork.OutboundRulePropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.SubResource{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/frontendIPConfigurations/" + frontEndIPConfigName),
|
||||
},
|
||||
},
|
||||
BackendAddressPool: &armnetwork.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/" + l.Subscription + "/resourceGroups/" + l.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + l.Name + "/backendAddressPools/all"),
|
||||
},
|
||||
Protocol: armnetwork.LoadBalancerOutboundRuleProtocolAll.ToPtr(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@ -10,18 +10,22 @@ import (
|
||||
|
||||
// ScaleSet defines a Azure scale set.
|
||||
type ScaleSet struct {
|
||||
Name string
|
||||
NamePrefix string
|
||||
Location string
|
||||
InstanceType string
|
||||
StateDiskSizeGB int32
|
||||
Count int64
|
||||
Username string
|
||||
SubnetID string
|
||||
NetworkSecurityGroup string
|
||||
Password string
|
||||
Image string
|
||||
UserAssignedIdentity string
|
||||
Name string
|
||||
NamePrefix string
|
||||
Subscription string
|
||||
ResourceGroup string
|
||||
Location string
|
||||
InstanceType string
|
||||
StateDiskSizeGB int32
|
||||
Count int64
|
||||
Username string
|
||||
SubnetID string
|
||||
NetworkSecurityGroup string
|
||||
Password string
|
||||
Image string
|
||||
UserAssignedIdentity string
|
||||
LoadBalancerName string
|
||||
LoadBalancerBackendAddressPool string
|
||||
}
|
||||
|
||||
// Azure returns the Azure representation of ScaleSet.
|
||||
@ -72,13 +76,16 @@ func (s ScaleSet) Azure() armcompute.VirtualMachineScaleSet {
|
||||
{
|
||||
Name: to.StringPtr(s.Name),
|
||||
Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{
|
||||
Primary: to.BoolPtr(true),
|
||||
Subnet: &armcompute.APIEntityReference{
|
||||
ID: to.StringPtr(s.SubnetID),
|
||||
},
|
||||
PublicIPAddressConfiguration: &armcompute.VirtualMachineScaleSetPublicIPAddressConfiguration{
|
||||
Name: to.StringPtr(s.Name),
|
||||
Properties: &armcompute.VirtualMachineScaleSetPublicIPAddressConfigurationProperties{
|
||||
IdleTimeoutInMinutes: to.Int32Ptr(15), // default per https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-networking#creating-a-scale-set-with-public-ip-per-virtual-machine
|
||||
LoadBalancerBackendAddressPools: []*armcompute.SubResource{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/" + s.Subscription + "/resourcegroups/" + s.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + s.LoadBalancerName + "/backendAddressPools/" + s.LoadBalancerBackendAddressPool),
|
||||
},
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/" + s.Subscription + "/resourcegroups/" + s.ResourceGroup + "/providers/Microsoft.Network/loadBalancers/" + s.LoadBalancerName + "/backendAddressPools/all"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -26,6 +26,7 @@ type azureclient interface {
|
||||
GetState() (state.ConstellationState, error)
|
||||
SetState(state.ConstellationState) error
|
||||
CreateResourceGroup(ctx context.Context) error
|
||||
CreateExternalLoadBalancer(ctx context.Context) error
|
||||
CreateVirtualNetwork(ctx context.Context) error
|
||||
CreateSecurityGroup(ctx context.Context, input azurecl.NetworkSecurityGroupInput) error
|
||||
CreateInstances(ctx context.Context, input azurecl.CreateInstancesInput) error
|
||||
|
@ -24,6 +24,7 @@ type fakeAzureClient struct {
|
||||
subscriptionID string
|
||||
tenantID string
|
||||
subnetID string
|
||||
loadBalancerName string
|
||||
coordinatorsScaleSet string
|
||||
nodesScaleSet string
|
||||
networkSecurityGroup string
|
||||
@ -77,6 +78,11 @@ func (c *fakeAzureClient) CreateVirtualNetwork(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeAzureClient) CreateExternalLoadBalancer(ctx context.Context) error {
|
||||
c.loadBalancerName = "loadBalancer"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeAzureClient) CreateSecurityGroup(ctx context.Context, input azurecl.NetworkSecurityGroupInput) error {
|
||||
c.networkSecurityGroup = "network-security-group"
|
||||
return nil
|
||||
@ -152,6 +158,7 @@ type stubAzureClient struct {
|
||||
createResourceGroupErr error
|
||||
createVirtualNetworkErr error
|
||||
createSecurityGroupErr error
|
||||
createLoadBalancerErr error
|
||||
createInstancesErr error
|
||||
createServicePrincipalErr error
|
||||
terminateResourceGroupErr error
|
||||
@ -166,6 +173,10 @@ func (c *stubAzureClient) SetState(state.ConstellationState) error {
|
||||
return c.setStateErr
|
||||
}
|
||||
|
||||
func (c *stubAzureClient) CreateExternalLoadBalancer(ctx context.Context) error {
|
||||
return c.createLoadBalancerErr
|
||||
}
|
||||
|
||||
func (c *stubAzureClient) CreateResourceGroup(ctx context.Context) error {
|
||||
return c.createResourceGroupErr
|
||||
}
|
||||
@ -271,11 +282,9 @@ func (c *fakeGcpClient) CreateFirewall(ctx context.Context, input gcpcl.Firewall
|
||||
if c.network == "" {
|
||||
return errors.New("client has not network")
|
||||
}
|
||||
var firewalls []string
|
||||
for _, rule := range input.Ingress {
|
||||
firewalls = append(firewalls, rule.Name)
|
||||
c.firewalls = append(c.firewalls, rule.Name)
|
||||
}
|
||||
c.firewalls = firewalls
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,45 @@ func (c *Creator) createGCP(ctx context.Context, cl gcpclient, config *config.Co
|
||||
return state.ConstellationState{}, err
|
||||
}
|
||||
|
||||
// additionally create allow-internal rules
|
||||
internalFirewallInput := gcpcl.FirewallInput{
|
||||
Ingress: cloudtypes.Firewall{
|
||||
{
|
||||
Name: "allow-cluster-internal-tcp",
|
||||
Protocol: "tcp",
|
||||
IPRange: gcpcl.SubnetExtCIDR,
|
||||
},
|
||||
{
|
||||
Name: "allow-cluster-internal-udp",
|
||||
Protocol: "udp",
|
||||
IPRange: gcpcl.SubnetExtCIDR,
|
||||
},
|
||||
{
|
||||
Name: "allow-cluster-internal-icmp",
|
||||
Protocol: "icmp",
|
||||
IPRange: gcpcl.SubnetExtCIDR,
|
||||
},
|
||||
{
|
||||
Name: "allow-node-internal-tcp",
|
||||
Protocol: "tcp",
|
||||
IPRange: gcpcl.SubnetCIDR,
|
||||
},
|
||||
{
|
||||
Name: "allow-node-internal-udp",
|
||||
Protocol: "udp",
|
||||
IPRange: gcpcl.SubnetCIDR,
|
||||
},
|
||||
{
|
||||
Name: "allow-node-internal-icmp",
|
||||
Protocol: "icmp",
|
||||
IPRange: gcpcl.SubnetCIDR,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := cl.CreateFirewall(ctx, internalFirewallInput); err != nil {
|
||||
return state.ConstellationState{}, err
|
||||
}
|
||||
|
||||
createInput := client.CreateInstancesInput{
|
||||
CountCoordinators: coordCount,
|
||||
CountNodes: nodeCount,
|
||||
@ -104,6 +143,9 @@ func (c *Creator) createAzure(ctx context.Context, cl azureclient, config *confi
|
||||
if err := cl.CreateResourceGroup(ctx); err != nil {
|
||||
return state.ConstellationState{}, err
|
||||
}
|
||||
if err := cl.CreateExternalLoadBalancer(ctx); err != nil {
|
||||
return state.ConstellationState{}, err
|
||||
}
|
||||
if err := cl.CreateVirtualNetwork(ctx); err != nil {
|
||||
return state.ConstellationState{}, err
|
||||
}
|
||||
|
@ -33,7 +33,11 @@ func TestCreator(t *testing.T) {
|
||||
GCPCoordinatorInstanceTemplate: "coordinator-template",
|
||||
GCPNetwork: "network",
|
||||
GCPSubnetwork: "subnetwork",
|
||||
GCPFirewalls: []string{"coordinator", "wireguard", "ssh", "nodeport"},
|
||||
GCPFirewalls: []string{
|
||||
"coordinator", "wireguard", "ssh", "nodeport", "kubernetes",
|
||||
"allow-cluster-internal-tcp", "allow-cluster-internal-udp", "allow-cluster-internal-icmp",
|
||||
"allow-node-internal-tcp", "allow-node-internal-udp", "allow-node-internal-icmp",
|
||||
},
|
||||
}
|
||||
|
||||
wantAzureState := state.ConstellationState{
|
||||
|
@ -19,25 +19,30 @@ type Firewall config.Firewall
|
||||
func (f Firewall) GCP() ([]*computepb.Firewall, error) {
|
||||
var fw []*computepb.Firewall
|
||||
for _, rule := range f {
|
||||
var destRange []string = nil
|
||||
var srcRange []string
|
||||
if rule.IPRange != "" {
|
||||
destRange = append(destRange, rule.IPRange)
|
||||
srcRange = []string{rule.IPRange}
|
||||
}
|
||||
|
||||
ports, err := portOrRange(rule.FromPort, rule.ToPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var ports []string
|
||||
if rule.FromPort != 0 || rule.ToPort != 0 {
|
||||
port, err := portOrRange(rule.FromPort, rule.ToPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = []string{port}
|
||||
}
|
||||
|
||||
fw = append(fw, &computepb.Firewall{
|
||||
Allowed: []*computepb.Allowed{
|
||||
{
|
||||
IPProtocol: proto.String(rule.Protocol),
|
||||
Ports: []string{ports},
|
||||
Ports: ports,
|
||||
},
|
||||
},
|
||||
Description: proto.String(rule.Description),
|
||||
DestinationRanges: destRange,
|
||||
Name: proto.String(rule.Name),
|
||||
Description: proto.String(rule.Description),
|
||||
SourceRanges: srcRange,
|
||||
Name: proto.String(rule.Name),
|
||||
})
|
||||
}
|
||||
return fw, nil
|
||||
|
@ -29,11 +29,18 @@ func TestFirewallGCP(t *testing.T) {
|
||||
IPRange: "",
|
||||
FromPort: 51820,
|
||||
},
|
||||
{
|
||||
Name: "test-3",
|
||||
Description: "This is the Test-3 Permission",
|
||||
Protocol: "tcp",
|
||||
IPRange: "192.0.2.0/24",
|
||||
FromPort: 4000,
|
||||
},
|
||||
}
|
||||
|
||||
firewalls, err := testFw.GCP()
|
||||
assert.NoError(err)
|
||||
assert.Equal(2, len(firewalls))
|
||||
assert.Equal(len(testFw), len(firewalls))
|
||||
|
||||
// Check permissions
|
||||
for i := 0; i < len(testFw); i++ {
|
||||
@ -47,6 +54,11 @@ func TestFirewallGCP(t *testing.T) {
|
||||
|
||||
assert.Equal(testFw[i].Name, firewall1.GetName())
|
||||
assert.Equal(testFw[i].Description, firewall1.GetDescription())
|
||||
|
||||
if testFw[i].IPRange != "" {
|
||||
require.Len(firewall1.GetSourceRanges(), 1)
|
||||
assert.Equal(testFw[i].IPRange, firewall1.GetSourceRanges()[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -357,6 +357,9 @@ func readOrGenerateVPNKey(fileHandler file.Handler, privKeyPath string) (privKey
|
||||
func ipsToEndpoints(ips []string, port string) []string {
|
||||
var endpoints []string
|
||||
for _, ip := range ips {
|
||||
if ip == "" {
|
||||
continue
|
||||
}
|
||||
endpoints = append(endpoints, net.JoinHostPort(ip, port))
|
||||
}
|
||||
return endpoints
|
||||
|
@ -355,7 +355,7 @@ func TestWriteOutput(t *testing.T) {
|
||||
func TestIpsToEndpoints(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ips := []string{"192.0.2.1", "192.0.2.2", "192.0.2.3"}
|
||||
ips := []string{"192.0.2.1", "192.0.2.2", "", "192.0.2.3"}
|
||||
port := "8080"
|
||||
endpoints := ipsToEndpoints(ips, port)
|
||||
assert.Equal([]string{"192.0.2.1:8080", "192.0.2.2:8080", "192.0.2.3:8080"}, endpoints)
|
||||
|
@ -48,18 +48,19 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
||||
c.nodeTemplate = nodeTemplateInput.Name
|
||||
|
||||
coordinatorTemplateInput := insertInstanceTemplateInput{
|
||||
Name: c.name + "-control-plane-" + c.uid,
|
||||
Network: c.network,
|
||||
Subnetwork: c.subnetwork,
|
||||
ImageId: input.ImageId,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: int64(input.StateDiskSizeGB),
|
||||
Role: role.Coordinator.String(),
|
||||
KubeEnv: input.KubeEnv,
|
||||
Project: c.project,
|
||||
Zone: c.zone,
|
||||
Region: c.region,
|
||||
UID: c.uid,
|
||||
Name: c.name + "-control-plane-" + c.uid,
|
||||
Network: c.network,
|
||||
Subnetwork: c.subnetwork,
|
||||
SecondarySubnetworkRangeName: c.secondarySubnetworkRange,
|
||||
ImageId: input.ImageId,
|
||||
InstanceType: input.InstanceType,
|
||||
StateDiskSizeGB: int64(input.StateDiskSizeGB),
|
||||
Role: role.Coordinator.String(),
|
||||
KubeEnv: input.KubeEnv,
|
||||
Project: c.project,
|
||||
Zone: c.zone,
|
||||
Region: c.region,
|
||||
UID: c.uid,
|
||||
}
|
||||
op, err = c.insertInstanceTemplate(ctx, coordinatorTemplateInput)
|
||||
if err != nil {
|
||||
@ -72,6 +73,21 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
||||
}
|
||||
ops = []Operation{}
|
||||
|
||||
coordinatorGroupInput := instanceGroupManagerInput{
|
||||
Count: input.CountCoordinators,
|
||||
Name: strings.Join([]string{c.name, "control-plane", c.uid}, "-"),
|
||||
Template: c.coordinatorTemplate,
|
||||
UID: c.uid,
|
||||
Project: c.project,
|
||||
Zone: c.zone,
|
||||
}
|
||||
op, err = c.insertInstanceGroupManger(ctx, coordinatorGroupInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inserting instanceGroupManager failed: %w", err)
|
||||
}
|
||||
ops = append(ops, op)
|
||||
c.coordinatorInstanceGroup = coordinatorGroupInput.Name
|
||||
|
||||
nodeGroupInput := instanceGroupManagerInput{
|
||||
Count: input.CountNodes,
|
||||
Name: strings.Join([]string{c.name, "worker", c.uid}, "-"),
|
||||
@ -87,20 +103,6 @@ func (c *Client) CreateInstances(ctx context.Context, input CreateInstancesInput
|
||||
ops = append(ops, op)
|
||||
c.nodesInstanceGroup = nodeGroupInput.Name
|
||||
|
||||
coordinatorGroupInput := instanceGroupManagerInput{
|
||||
Count: input.CountCoordinators,
|
||||
Name: strings.Join([]string{c.name, "control-plane", c.uid}, "-"),
|
||||
Template: c.coordinatorTemplate,
|
||||
UID: c.uid,
|
||||
Project: c.project,
|
||||
Zone: c.zone,
|
||||
}
|
||||
op, err = c.insertInstanceGroupManger(ctx, coordinatorGroupInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inserting instanceGroupManager failed: %w", err)
|
||||
}
|
||||
ops = append(ops, op)
|
||||
c.coordinatorInstanceGroup = coordinatorGroupInput.Name
|
||||
if err := c.waitForOperations(ctx, ops); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -390,7 +392,7 @@ func (i insertInstanceTemplateInput) insertInstanceTemplateRequest() *computepb.
|
||||
EnableVtpm: proto.Bool(true),
|
||||
},
|
||||
Tags: &computepb.Tags{
|
||||
Items: []string{"constellation"},
|
||||
Items: []string{"constellation-" + i.UID},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -9,6 +9,11 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
SubnetCIDR = "192.168.178.0/24"
|
||||
SubnetExtCIDR = "10.10.0.0/16"
|
||||
)
|
||||
|
||||
// CreateFirewall creates a set of firewall rules for the client's network.
|
||||
//
|
||||
// The client must have a VPC network to set firewall rules.
|
||||
@ -163,13 +168,13 @@ func (c *Client) createSubnet(ctx context.Context, name, network, secondaryRange
|
||||
Project: c.project,
|
||||
Region: c.region,
|
||||
SubnetworkResource: &computepb.Subnetwork{
|
||||
IpCidrRange: proto.String("192.168.178.0/24"),
|
||||
IpCidrRange: proto.String(SubnetCIDR),
|
||||
Name: proto.String(name),
|
||||
Network: proto.String("projects/" + c.project + "/global/networks/" + network),
|
||||
SecondaryIpRanges: []*computepb.SubnetworkSecondaryRange{
|
||||
{
|
||||
RangeName: proto.String(secondaryRangeName),
|
||||
IpCidrRange: proto.String("10.10.0.0/16"),
|
||||
IpCidrRange: proto.String(SubnetExtCIDR),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1,7 +1,7 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
@ -15,7 +15,7 @@ func (a Autoscaler) Name() string {
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
func (a Autoscaler) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (a Autoscaler) Secrets(instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
@ -32,13 +34,13 @@ func (c CloudControllerManager) ExtraArgs() []string {
|
||||
|
||||
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
|
||||
func (c CloudControllerManager) ConfigMaps(instance core.Instance) (resources.ConfigMaps, error) {
|
||||
func (c CloudControllerManager) ConfigMaps(instance cloudtypes.Instance) (resources.ConfigMaps, error) {
|
||||
return resources.ConfigMaps{}, nil
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
|
||||
func (c CloudControllerManager) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (c CloudControllerManager) Secrets(ctx context.Context, instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
@ -58,13 +60,6 @@ func (c CloudControllerManager) Env() []k8s.EnvVar {
|
||||
return []k8s.EnvVar{}
|
||||
}
|
||||
|
||||
// PrepareInstance is called on every instance before deploying the cloud-controller-manager.
|
||||
// Allows for cloud-provider specific hooks.
|
||||
func (c CloudControllerManager) PrepareInstance(instance core.Instance, vpnIP string) error {
|
||||
// no specific hook required.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Supported is used to determine if cloud controller manager is implemented for this cloud provider.
|
||||
func (c CloudControllerManager) Supported() bool {
|
||||
return false
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
)
|
||||
|
||||
@ -14,30 +14,30 @@ import (
|
||||
type Metadata struct{}
|
||||
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
func (m Metadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
func (m Metadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
// TODO: implement using https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ec2#Client.DescribeInstances
|
||||
// And using AWS ec2 instance tags
|
||||
panic("function *Metadata.List not implemented")
|
||||
}
|
||||
|
||||
// Self retrieves the current instance.
|
||||
func (m Metadata) Self(ctx context.Context) (core.Instance, error) {
|
||||
func (m Metadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
identityDocument, err := retrieveIdentityDocument(ctx)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
// TODO: implement metadata using AWS ec2 instance tags
|
||||
return core.Instance{
|
||||
return cloudtypes.Instance{
|
||||
Name: identityDocument.InstanceID,
|
||||
ProviderID: providerID(identityDocument),
|
||||
IPs: []string{
|
||||
PrivateIPs: []string{
|
||||
identityDocument.PrivateIP,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetInstance retrieves an instance using its providerID.
|
||||
func (m Metadata) GetInstance(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m Metadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
// TODO: implement using https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ec2#DescribeInstancesAPIClient.DescribeInstances
|
||||
// And using AWS ec2 instance tags
|
||||
// Filter request to only return info on this instance
|
||||
|
@ -12,6 +12,24 @@ type imdsAPI interface {
|
||||
Retrieve(ctx context.Context) (metadataResponse, error)
|
||||
}
|
||||
|
||||
type virtualNetworksClientListPager interface {
|
||||
NextPage(ctx context.Context) bool
|
||||
PageResponse() armnetwork.VirtualNetworksClientListResponse
|
||||
}
|
||||
|
||||
type virtualNetworksAPI interface {
|
||||
List(resourceGroupName string, options *armnetwork.VirtualNetworksClientListOptions) virtualNetworksClientListPager
|
||||
}
|
||||
|
||||
type securityGroupsClientListPager interface {
|
||||
NextPage(ctx context.Context) bool
|
||||
PageResponse() armnetwork.SecurityGroupsClientListResponse
|
||||
}
|
||||
|
||||
type securityGroupsAPI interface {
|
||||
List(resourceGroupName string, options *armnetwork.SecurityGroupsClientListOptions) securityGroupsClientListPager
|
||||
}
|
||||
|
||||
type networkInterfacesAPI interface {
|
||||
GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string,
|
||||
virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string,
|
||||
@ -21,6 +39,16 @@ type networkInterfacesAPI interface {
|
||||
options *armnetwork.InterfacesClientGetOptions) (armnetwork.InterfacesClientGetResponse, error)
|
||||
}
|
||||
|
||||
type publicIPAddressesAPI interface {
|
||||
GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string,
|
||||
virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string,
|
||||
ipConfigurationName string, publicIPAddressName string,
|
||||
options *armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions,
|
||||
) (armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse, error)
|
||||
Get(ctx context.Context, resourceGroupName string, publicIPAddressName string,
|
||||
options *armnetwork.PublicIPAddressesClientGetOptions) (armnetwork.PublicIPAddressesClientGetResponse, error)
|
||||
}
|
||||
|
||||
type virtualMachinesAPI interface {
|
||||
Get(ctx context.Context, resourceGroupName string, vmName string, options *armcompute.VirtualMachinesClientGetOptions) (armcompute.VirtualMachinesClientGetResponse, error)
|
||||
List(resourceGroupName string, options *armcompute.VirtualMachinesClientListOptions) virtualMachinesClientListPager
|
||||
@ -45,6 +73,15 @@ type scaleSetsAPI interface {
|
||||
List(resourceGroupName string, options *armcompute.VirtualMachineScaleSetsClientListOptions) virtualMachineScaleSetsClientListPager
|
||||
}
|
||||
|
||||
type loadBalancersClientListPager interface {
|
||||
NextPage(ctx context.Context) bool
|
||||
PageResponse() armnetwork.LoadBalancersClientListResponse
|
||||
}
|
||||
|
||||
type loadBalancerAPI interface {
|
||||
List(resourceGroupName string, options *armnetwork.LoadBalancersClientListOptions) loadBalancersClientListPager
|
||||
}
|
||||
|
||||
type virtualMachineScaleSetsClientListPager interface {
|
||||
NextPage(ctx context.Context) bool
|
||||
PageResponse() armcompute.VirtualMachineScaleSetsClientListResponse
|
||||
|
@ -174,3 +174,121 @@ func (a *stubTagsAPI) CreateOrUpdateAtScope(ctx context.Context, scope string, p
|
||||
func (a *stubTagsAPI) UpdateAtScope(ctx context.Context, scope string, parameters armresources.TagsPatchResource, options *armresources.TagsClientUpdateAtScopeOptions) (armresources.TagsClientUpdateAtScopeResponse, error) {
|
||||
return armresources.TagsClientUpdateAtScopeResponse{}, a.updateAtScopeErr
|
||||
}
|
||||
|
||||
type stubSecurityGroupsClientListPager struct {
|
||||
pagesCounter int
|
||||
pages [][]*armnetwork.SecurityGroup
|
||||
}
|
||||
|
||||
func (p *stubSecurityGroupsClientListPager) NextPage(ctx context.Context) bool {
|
||||
return p.pagesCounter < len(p.pages)
|
||||
}
|
||||
|
||||
func (p *stubSecurityGroupsClientListPager) PageResponse() armnetwork.SecurityGroupsClientListResponse {
|
||||
if p.pagesCounter >= len(p.pages) {
|
||||
return armnetwork.SecurityGroupsClientListResponse{}
|
||||
}
|
||||
p.pagesCounter = p.pagesCounter + 1
|
||||
return armnetwork.SecurityGroupsClientListResponse{
|
||||
SecurityGroupsClientListResult: armnetwork.SecurityGroupsClientListResult{
|
||||
SecurityGroupListResult: armnetwork.SecurityGroupListResult{
|
||||
Value: p.pages[p.pagesCounter-1],
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type stubSecurityGroupsAPI struct {
|
||||
listPages [][]*armnetwork.SecurityGroup
|
||||
}
|
||||
|
||||
func (a *stubSecurityGroupsAPI) List(resourceGroupName string, options *armnetwork.SecurityGroupsClientListOptions) securityGroupsClientListPager {
|
||||
return &stubSecurityGroupsClientListPager{
|
||||
pages: a.listPages,
|
||||
}
|
||||
}
|
||||
|
||||
type stubVirtualNetworksClientListPager struct {
|
||||
pagesCounter int
|
||||
pages [][]*armnetwork.VirtualNetwork
|
||||
}
|
||||
|
||||
func (p *stubVirtualNetworksClientListPager) NextPage(ctx context.Context) bool {
|
||||
return p.pagesCounter < len(p.pages)
|
||||
}
|
||||
|
||||
func (p *stubVirtualNetworksClientListPager) PageResponse() armnetwork.VirtualNetworksClientListResponse {
|
||||
if p.pagesCounter >= len(p.pages) {
|
||||
return armnetwork.VirtualNetworksClientListResponse{}
|
||||
}
|
||||
p.pagesCounter = p.pagesCounter + 1
|
||||
return armnetwork.VirtualNetworksClientListResponse{
|
||||
VirtualNetworksClientListResult: armnetwork.VirtualNetworksClientListResult{
|
||||
VirtualNetworkListResult: armnetwork.VirtualNetworkListResult{
|
||||
Value: p.pages[p.pagesCounter-1],
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type stubVirtualNetworksAPI struct {
|
||||
listPages [][]*armnetwork.VirtualNetwork
|
||||
}
|
||||
|
||||
func (a *stubVirtualNetworksAPI) List(resourceGroupName string, options *armnetwork.VirtualNetworksClientListOptions) virtualNetworksClientListPager {
|
||||
return &stubVirtualNetworksClientListPager{
|
||||
pages: a.listPages,
|
||||
}
|
||||
}
|
||||
|
||||
type stubLoadBalancersClientListPager struct {
|
||||
pagesCounter int
|
||||
pages [][]*armnetwork.LoadBalancer
|
||||
}
|
||||
|
||||
func (p *stubLoadBalancersClientListPager) NextPage(ctx context.Context) bool {
|
||||
return p.pagesCounter < len(p.pages)
|
||||
}
|
||||
|
||||
func (p *stubLoadBalancersClientListPager) PageResponse() armnetwork.LoadBalancersClientListResponse {
|
||||
if p.pagesCounter >= len(p.pages) {
|
||||
return armnetwork.LoadBalancersClientListResponse{}
|
||||
}
|
||||
p.pagesCounter = p.pagesCounter + 1
|
||||
return armnetwork.LoadBalancersClientListResponse{
|
||||
LoadBalancersClientListResult: armnetwork.LoadBalancersClientListResult{
|
||||
LoadBalancerListResult: armnetwork.LoadBalancerListResult{
|
||||
Value: p.pages[p.pagesCounter-1],
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type stubLoadBalancersAPI struct {
|
||||
listPages [][]*armnetwork.LoadBalancer
|
||||
}
|
||||
|
||||
func (a *stubLoadBalancersAPI) List(resourceGroupName string, options *armnetwork.LoadBalancersClientListOptions) loadBalancersClientListPager {
|
||||
return &stubLoadBalancersClientListPager{
|
||||
pages: a.listPages,
|
||||
}
|
||||
}
|
||||
|
||||
type stubPublicIPAddressesAPI struct {
|
||||
getResponse armnetwork.PublicIPAddressesClientGetResponse
|
||||
getVirtualMachineScaleSetPublicIPAddressResponse armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse
|
||||
getErr error
|
||||
}
|
||||
|
||||
func (a *stubPublicIPAddressesAPI) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string,
|
||||
options *armnetwork.PublicIPAddressesClientGetOptions,
|
||||
) (armnetwork.PublicIPAddressesClientGetResponse, error) {
|
||||
return a.getResponse, a.getErr
|
||||
}
|
||||
|
||||
func (a *stubPublicIPAddressesAPI) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string,
|
||||
virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string,
|
||||
options *armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions,
|
||||
) (armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse, error) {
|
||||
return a.getVirtualMachineScaleSetPublicIPAddressResponse, a.getErr
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -16,7 +16,7 @@ func (a *Autoscaler) Name() string {
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
func (a *Autoscaler) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (a *Autoscaler) Secrets(instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
subscriptionID, resourceGroup, err := extractBasicsFromProviderID(instance.ProviderID)
|
||||
if err != nil {
|
||||
return resources.Secrets{}, err
|
||||
|
@ -3,7 +3,7 @@ package azure
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -13,13 +13,13 @@ import (
|
||||
|
||||
func TestAutoscalerSecrets(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
instance core.Instance
|
||||
instance cloudtypes.Instance
|
||||
cloudServiceAccountURI string
|
||||
wantSecrets resources.Secrets
|
||||
wantErr bool
|
||||
}{
|
||||
"Secrets works": {
|
||||
instance: core.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
cloudServiceAccountURI: "serviceaccount://azure?tenant_id=tenant-id&client_id=client-id&client_secret=client-secret",
|
||||
wantSecrets: resources.Secrets{
|
||||
&k8s.Secret{
|
||||
@ -43,11 +43,11 @@ func TestAutoscalerSecrets(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"invalid providerID fails": {
|
||||
instance: core.Instance{ProviderID: "invalid"},
|
||||
instance: cloudtypes.Instance{ProviderID: "invalid"},
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid cloudServiceAccountURI fails": {
|
||||
instance: core.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
cloudServiceAccountURI: "invalid",
|
||||
wantErr: true,
|
||||
},
|
||||
|
@ -1,17 +1,31 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ccmMetadata interface {
|
||||
GetNetworkSecurityGroupName(ctx context.Context) (string, error)
|
||||
GetLoadBalancerName(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
// CloudControllerManager holds the Azure cloud-controller-manager configuration.
|
||||
type CloudControllerManager struct{}
|
||||
type CloudControllerManager struct {
|
||||
metadata ccmMetadata
|
||||
}
|
||||
|
||||
func NewCloudControllerManager(metadata ccmMetadata) *CloudControllerManager {
|
||||
return &CloudControllerManager{
|
||||
metadata: metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Image returns the container image used to provide cloud-controller-manager for the cloud-provider.
|
||||
func (c *CloudControllerManager) Image() string {
|
||||
@ -33,18 +47,20 @@ func (c *CloudControllerManager) ExtraArgs() []string {
|
||||
return []string{
|
||||
"--controllers=*,-cloud-node",
|
||||
"--cloud-config=/etc/azure/azure.json",
|
||||
"--allocate-node-cidrs=false",
|
||||
"--configure-cloud-routes=true",
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
|
||||
func (c *CloudControllerManager) ConfigMaps(instance core.Instance) (resources.ConfigMaps, error) {
|
||||
func (c *CloudControllerManager) ConfigMaps(instance cloudtypes.Instance) (resources.ConfigMaps, error) {
|
||||
return resources.ConfigMaps{}, nil
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
|
||||
func (c *CloudControllerManager) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (c *CloudControllerManager) Secrets(ctx context.Context, instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
// Azure CCM expects cloud provider config to contain cluster configuration and service principal client secrets
|
||||
// reference: https://kubernetes-sigs.github.io/cloud-provider-azure/install/configs/
|
||||
|
||||
@ -62,11 +78,24 @@ func (c *CloudControllerManager) Secrets(instance core.Instance, cloudServiceAcc
|
||||
vmType = "vmss"
|
||||
}
|
||||
|
||||
securityGroupName, err := c.metadata.GetNetworkSecurityGroupName(ctx)
|
||||
if err != nil {
|
||||
return resources.Secrets{}, err
|
||||
}
|
||||
|
||||
loadBalancerName, err := c.metadata.GetLoadBalancerName(ctx)
|
||||
if err != nil {
|
||||
return resources.Secrets{}, err
|
||||
}
|
||||
|
||||
config := cloudConfig{
|
||||
Cloud: "AzurePublicCloud",
|
||||
TenantID: creds.TenantID,
|
||||
SubscriptionID: subscriptionID,
|
||||
ResourceGroup: resourceGroup,
|
||||
LoadBalancerSku: "standard",
|
||||
SecurityGroupName: securityGroupName,
|
||||
LoadBalancerName: loadBalancerName,
|
||||
UseInstanceMetadata: true,
|
||||
VmType: vmType,
|
||||
Location: creds.Location,
|
||||
@ -127,13 +156,6 @@ func (c *CloudControllerManager) Env() []k8s.EnvVar {
|
||||
return []k8s.EnvVar{}
|
||||
}
|
||||
|
||||
// PrepareInstance is called on every instance before deploying the cloud-controller-manager.
|
||||
// Allows for cloud-provider specific hooks.
|
||||
func (c *CloudControllerManager) PrepareInstance(instance core.Instance, vpnIP string) error {
|
||||
// no specific hook required.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Supported is used to determine if cloud controller manager is implemented for this cloud provider.
|
||||
func (c *CloudControllerManager) Supported() bool {
|
||||
return true
|
||||
@ -148,6 +170,8 @@ type cloudConfig struct {
|
||||
SubnetName string `json:"subnetName,omitempty"`
|
||||
SecurityGroupName string `json:"securityGroupName,omitempty"`
|
||||
SecurityGroupResourceGroup string `json:"securityGroupResourceGroup,omitempty"`
|
||||
LoadBalancerName string `json:"loadBalancerName,omitempty"`
|
||||
LoadBalancerSku string `json:"loadBalancerSku,omitempty"`
|
||||
VNetName string `json:"vnetName,omitempty"`
|
||||
VNetResourceGroup string `json:"vnetResourceGroup,omitempty"`
|
||||
CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"`
|
||||
|
@ -1,9 +1,11 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -12,15 +14,18 @@ import (
|
||||
)
|
||||
|
||||
func TestSecrets(t *testing.T) {
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
instance core.Instance
|
||||
instance cloudtypes.Instance
|
||||
metadata ccmMetadata
|
||||
cloudServiceAccountURI string
|
||||
wantSecrets resources.Secrets
|
||||
wantErr bool
|
||||
}{
|
||||
"Secrets works": {
|
||||
instance: core.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
cloudServiceAccountURI: "serviceaccount://azure?tenant_id=tenant-id&client_id=client-id&client_secret=client-secret&location=location",
|
||||
metadata: &ccmMetadataStub{loadBalancerName: "load-balancer-name", networkSecurityGroupName: "network-security-group-name"},
|
||||
wantSecrets: resources.Secrets{
|
||||
&k8s.Secret{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
@ -32,14 +37,15 @@ func TestSecrets(t *testing.T) {
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"azure.json": []byte(`{"cloud":"AzurePublicCloud","tenantId":"tenant-id","subscriptionId":"subscription-id","resourceGroup":"resource-group","location":"location","useInstanceMetadata":true,"vmType":"standard","aadClientId":"client-id","aadClientSecret":"client-secret"}`),
|
||||
"azure.json": []byte(`{"cloud":"AzurePublicCloud","tenantId":"tenant-id","subscriptionId":"subscription-id","resourceGroup":"resource-group","location":"location","securityGroupName":"network-security-group-name","loadBalancerName":"load-balancer-name","loadBalancerSku":"standard","useInstanceMetadata":true,"vmType":"standard","aadClientId":"client-id","aadClientSecret":"client-secret"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"Secrets works for scale sets": {
|
||||
instance: core.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id"},
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id"},
|
||||
cloudServiceAccountURI: "serviceaccount://azure?tenant_id=tenant-id&client_id=client-id&client_secret=client-secret&location=location",
|
||||
metadata: &ccmMetadataStub{loadBalancerName: "load-balancer-name", networkSecurityGroupName: "network-security-group-name"},
|
||||
wantSecrets: resources.Secrets{
|
||||
&k8s.Secret{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
@ -51,17 +57,31 @@ func TestSecrets(t *testing.T) {
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"azure.json": []byte(`{"cloud":"AzurePublicCloud","tenantId":"tenant-id","subscriptionId":"subscription-id","resourceGroup":"resource-group","location":"location","useInstanceMetadata":true,"vmType":"vmss","aadClientId":"client-id","aadClientSecret":"client-secret"}`),
|
||||
"azure.json": []byte(`{"cloud":"AzurePublicCloud","tenantId":"tenant-id","subscriptionId":"subscription-id","resourceGroup":"resource-group","location":"location","securityGroupName":"network-security-group-name","loadBalancerName":"load-balancer-name","loadBalancerSku":"standard","useInstanceMetadata":true,"vmType":"vmss","aadClientId":"client-id","aadClientSecret":"client-secret"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cannot get load balancer Name": {
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id"},
|
||||
cloudServiceAccountURI: "serviceaccount://azure?tenant_id=tenant-id&client_id=client-id&client_secret=client-secret&location=location",
|
||||
metadata: &ccmMetadataStub{getLoadBalancerNameErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot get network security group name": {
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id"},
|
||||
cloudServiceAccountURI: "serviceaccount://azure?tenant_id=tenant-id&client_id=client-id&client_secret=client-secret&location=location",
|
||||
metadata: &ccmMetadataStub{getNetworkSecurityGroupNameErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid providerID fails": {
|
||||
instance: core.Instance{ProviderID: "invalid"},
|
||||
instance: cloudtypes.Instance{ProviderID: "invalid"},
|
||||
metadata: &ccmMetadataStub{},
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid cloudServiceAccountURI fails": {
|
||||
instance: core.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
instance: cloudtypes.Instance{ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"},
|
||||
metadata: &ccmMetadataStub{},
|
||||
cloudServiceAccountURI: "invalid",
|
||||
wantErr: true,
|
||||
},
|
||||
@ -72,8 +92,8 @@ func TestSecrets(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
cloud := CloudControllerManager{}
|
||||
secrets, err := cloud.Secrets(tc.instance, tc.cloudServiceAccountURI)
|
||||
cloud := NewCloudControllerManager(tc.metadata)
|
||||
secrets, err := cloud.Secrets(context.Background(), tc.instance, tc.cloudServiceAccountURI)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
@ -92,10 +112,25 @@ func TestTrivialCCMFunctions(t *testing.T) {
|
||||
assert.NotEmpty(cloud.Path())
|
||||
assert.NotEmpty(cloud.Name())
|
||||
assert.NotEmpty(cloud.ExtraArgs())
|
||||
assert.Empty(cloud.ConfigMaps(core.Instance{}))
|
||||
assert.Empty(cloud.ConfigMaps(cloudtypes.Instance{}))
|
||||
assert.NotEmpty(cloud.Volumes())
|
||||
assert.NotEmpty(cloud.VolumeMounts())
|
||||
assert.Empty(cloud.Env())
|
||||
assert.NoError(cloud.PrepareInstance(core.Instance{}, "192.0.2.0"))
|
||||
assert.True(cloud.Supported())
|
||||
}
|
||||
|
||||
type ccmMetadataStub struct {
|
||||
networkSecurityGroupName string
|
||||
loadBalancerName string
|
||||
|
||||
getNetworkSecurityGroupNameErr error
|
||||
getLoadBalancerNameErr error
|
||||
}
|
||||
|
||||
func (c *ccmMetadataStub) GetNetworkSecurityGroupName(ctx context.Context) (string, error) {
|
||||
return c.networkSecurityGroupName, c.getNetworkSecurityGroupNameErr
|
||||
}
|
||||
|
||||
func (c *ccmMetadataStub) GetLoadBalancerName(ctx context.Context) (string, error) {
|
||||
return c.loadBalancerName, c.getLoadBalancerNameErr
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ func (c *CloudNodeManager) Path() string {
|
||||
// ExtraArgs returns a list of arguments to append to the cloud-node-manager command.
|
||||
func (c *CloudNodeManager) ExtraArgs() []string {
|
||||
return []string{
|
||||
"--wait-routes=false",
|
||||
"--wait-routes=true",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,15 +10,25 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
)
|
||||
|
||||
var (
|
||||
publicIPAddressRegexp = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft.Network/publicIPAddresses/(?P<IPname>[^/]+)`)
|
||||
keyPathRegexp = regexp.MustCompile(`^\/home\/([^\/]+)\/\.ssh\/authorized_keys$`)
|
||||
)
|
||||
|
||||
// Metadata implements azure metadata APIs.
|
||||
type Metadata struct {
|
||||
imdsAPI
|
||||
virtualNetworksAPI
|
||||
securityGroupsAPI
|
||||
networkInterfacesAPI
|
||||
publicIPAddressesAPI
|
||||
scaleSetsAPI
|
||||
loadBalancerAPI
|
||||
virtualMachinesAPI
|
||||
virtualMachineScaleSetVMsAPI
|
||||
tagsAPI
|
||||
@ -44,15 +54,23 @@ func NewMetadata(ctx context.Context) (*Metadata, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
virtualNetworksAPI := armnetwork.NewVirtualNetworksClient(subscriptionID, cred, nil)
|
||||
networkInterfacesAPI := armnetwork.NewInterfacesClient(subscriptionID, cred, nil)
|
||||
publicIPAddressesAPI := armnetwork.NewPublicIPAddressesClient(subscriptionID, cred, nil)
|
||||
securityGroupsAPI := armnetwork.NewSecurityGroupsClient(subscriptionID, cred, nil)
|
||||
scaleSetsAPI := armcompute.NewVirtualMachineScaleSetsClient(subscriptionID, cred, nil)
|
||||
loadBalancerAPI := armnetwork.NewLoadBalancersClient(subscriptionID, cred, nil)
|
||||
virtualMachinesAPI := armcompute.NewVirtualMachinesClient(subscriptionID, cred, nil)
|
||||
virtualMachineScaleSetVMsAPI := armcompute.NewVirtualMachineScaleSetVMsClient(subscriptionID, cred, nil)
|
||||
tagsAPI := armresources.NewTagsClient(subscriptionID, cred, nil)
|
||||
|
||||
return &Metadata{
|
||||
imdsAPI: &imdsAPI,
|
||||
virtualNetworksAPI: &virtualNetworksClient{virtualNetworksAPI},
|
||||
networkInterfacesAPI: &networkInterfacesClient{networkInterfacesAPI},
|
||||
securityGroupsAPI: &securityGroupsClient{securityGroupsAPI},
|
||||
publicIPAddressesAPI: &publicIPAddressesClient{publicIPAddressesAPI},
|
||||
loadBalancerAPI: &loadBalancersClient{loadBalancerAPI},
|
||||
scaleSetsAPI: &scaleSetsClient{scaleSetsAPI},
|
||||
virtualMachinesAPI: &virtualMachinesClient{virtualMachinesAPI},
|
||||
virtualMachineScaleSetVMsAPI: &virtualMachineScaleSetVMsClient{virtualMachineScaleSetVMsAPI},
|
||||
@ -61,7 +79,7 @@ func NewMetadata(ctx context.Context) (*Metadata, error) {
|
||||
}
|
||||
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
func (m *Metadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
func (m *Metadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
providerID, err := m.providerID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -78,23 +96,23 @@ func (m *Metadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
instances := make([]core.Instance, 0, len(singleInstances)+len(scaleSetInstances))
|
||||
instances := make([]cloudtypes.Instance, 0, len(singleInstances)+len(scaleSetInstances))
|
||||
instances = append(instances, singleInstances...)
|
||||
instances = append(instances, scaleSetInstances...)
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
// Self retrieves the current instance.
|
||||
func (m *Metadata) Self(ctx context.Context) (core.Instance, error) {
|
||||
func (m *Metadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
providerID, err := m.providerID(ctx)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
return m.GetInstance(ctx, providerID)
|
||||
}
|
||||
|
||||
// GetInstance retrieves an instance using its providerID.
|
||||
func (m *Metadata) GetInstance(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m *Metadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
instance, singleErr := m.getVM(ctx, providerID)
|
||||
if singleErr == nil {
|
||||
return instance, nil
|
||||
@ -103,7 +121,7 @@ func (m *Metadata) GetInstance(ctx context.Context, providerID string) (core.Ins
|
||||
if scaleSetErr == nil {
|
||||
return instance, nil
|
||||
}
|
||||
return core.Instance{}, fmt.Errorf("could not retrieve instance given providerID %v as either single vm or scale set vm: %v %v", providerID, singleErr, scaleSetErr)
|
||||
return cloudtypes.Instance{}, fmt.Errorf("could not retrieve instance given providerID %v as either single vm or scale set vm: %v %v", providerID, singleErr, scaleSetErr)
|
||||
}
|
||||
|
||||
// SignalRole signals the constellation role via cloud provider metadata.
|
||||
@ -120,6 +138,135 @@ func (m *Metadata) SignalRole(ctx context.Context, role role.Role) error {
|
||||
return m.setTag(ctx, core.RoleMetadataKey, role.String())
|
||||
}
|
||||
|
||||
// GetNetworkSecurityGroupName returns the security group name of the resource group.
|
||||
func (m *Metadata) GetNetworkSecurityGroupName(ctx context.Context) (string, error) {
|
||||
providerID, err := m.providerID(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, resourceGroup, err := extractBasicsFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nsg, err := m.getNetworkSecurityGroup(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if nsg == nil || nsg.Name == nil {
|
||||
return "", fmt.Errorf("could not dereference network security group name")
|
||||
}
|
||||
return *nsg.Name, nil
|
||||
}
|
||||
|
||||
// GetSubnetworkCIDR retrieves the subnetwork CIDR from cloud provider metadata.
|
||||
func (m *Metadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
|
||||
providerID, err := m.providerID(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, resourceGroup, err := extractBasicsFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
virtualNetwork, err := m.getVirtualNetwork(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if virtualNetwork == nil || virtualNetwork.Properties == nil || len(virtualNetwork.Properties.Subnets) == 0 ||
|
||||
virtualNetwork.Properties.Subnets[0].Properties == nil || virtualNetwork.Properties.Subnets[0].Properties.AddressPrefix == nil {
|
||||
return "", fmt.Errorf("could not retrieve subnetwork CIDR from virtual network %v", virtualNetwork)
|
||||
}
|
||||
|
||||
return *virtualNetwork.Properties.Subnets[0].Properties.AddressPrefix, nil
|
||||
}
|
||||
|
||||
// getLoadBalancer retrieves the load balancer from cloud provider metadata.
|
||||
func (m *Metadata) getLoadBalancer(ctx context.Context) (*armnetwork.LoadBalancer, error) {
|
||||
providerID, err := m.providerID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, resourceGroup, err := extractBasicsFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pager := m.loadBalancerAPI.List(resourceGroup, nil)
|
||||
|
||||
for pager.NextPage(ctx) {
|
||||
for _, lb := range pager.PageResponse().Value {
|
||||
if lb != nil && lb.Properties != nil {
|
||||
return lb, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("could not get any load balancer")
|
||||
}
|
||||
|
||||
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
|
||||
func (m *Metadata) SupportsLoadBalancer() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetLoadBalancerName returns the load balancer name of the resource group.
|
||||
func (m *Metadata) GetLoadBalancerName(ctx context.Context) (string, error) {
|
||||
lb, err := m.getLoadBalancer(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if lb == nil || lb.Name == nil {
|
||||
return "", fmt.Errorf("could not dereference load balancer name")
|
||||
}
|
||||
return *lb.Name, nil
|
||||
}
|
||||
|
||||
// GetLoadBalancerIP retrieves the first load balancer IP from cloud provider metadata.
|
||||
func (m *Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
|
||||
lb, err := m.getLoadBalancer(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if lb == nil || lb.Properties == nil {
|
||||
return "", fmt.Errorf("could not dereference load balancer IP configuration")
|
||||
}
|
||||
|
||||
var pubIPID string
|
||||
for _, fipConf := range lb.Properties.FrontendIPConfigurations {
|
||||
if fipConf == nil || fipConf.Properties == nil || fipConf.Properties.PublicIPAddress == nil || fipConf.Properties.PublicIPAddress.ID == nil {
|
||||
continue
|
||||
}
|
||||
pubIPID = *fipConf.Properties.PublicIPAddress.ID
|
||||
break
|
||||
}
|
||||
|
||||
if pubIPID == "" {
|
||||
return "", fmt.Errorf("could not find public IP address reference in load balancer")
|
||||
}
|
||||
|
||||
matches := publicIPAddressRegexp.FindStringSubmatch(pubIPID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("could not find public IP address name in load balancer: %v", pubIPID)
|
||||
}
|
||||
pubIPName := matches[1]
|
||||
|
||||
providerID, err := m.providerID(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, resourceGroup, err := extractBasicsFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := m.publicIPAddressesAPI.Get(ctx, resourceGroup, pubIPName, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not retrieve public IP address: %w", err)
|
||||
}
|
||||
if resp.Properties == nil || resp.Properties.IPAddress == nil {
|
||||
return "", fmt.Errorf("could not resolve public IP address reference for load balancer")
|
||||
}
|
||||
return *resp.Properties.IPAddress, nil
|
||||
}
|
||||
|
||||
// SetVPNIP stores the internally used VPN IP in cloud provider metadata (not required on azure).
|
||||
func (m *Metadata) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
return nil
|
||||
@ -166,7 +313,6 @@ func extractInstanceTags(tags map[string]*string) map[string]string {
|
||||
|
||||
// extractSSHKeys extracts SSH public keys from azure instance OS Profile.
|
||||
func extractSSHKeys(sshConfig armcompute.SSHConfiguration) map[string][]string {
|
||||
keyPathRegexp := regexp.MustCompile(`^\/home\/([^\/]+)\/\.ssh\/authorized_keys$`)
|
||||
sshKeys := map[string][]string{}
|
||||
for _, key := range sshConfig.PublicKeys {
|
||||
if key == nil || key.Path == nil || key.KeyData == nil {
|
||||
|
@ -8,24 +8,24 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
wantInstances := []core.Instance{
|
||||
wantInstances := []cloudtypes.Instance{
|
||||
{
|
||||
Name: "instance-name",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
},
|
||||
{
|
||||
Name: "scale-set-name-instance-id",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
},
|
||||
}
|
||||
@ -37,7 +37,7 @@ func TestList(t *testing.T) {
|
||||
virtualMachineScaleSetVMsAPI virtualMachineScaleSetVMsAPI
|
||||
tagsAPI tagsAPI
|
||||
wantErr bool
|
||||
wantInstances []core.Instance
|
||||
wantInstances []cloudtypes.Instance
|
||||
}{
|
||||
"List works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
@ -98,16 +98,16 @@ func TestList(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSelf(t *testing.T) {
|
||||
wantVMInstance := core.Instance{
|
||||
wantVMInstance := cloudtypes.Instance{
|
||||
Name: "instance-name",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
}
|
||||
wantScaleSetInstance := core.Instance{
|
||||
wantScaleSetInstance := cloudtypes.Instance{
|
||||
Name: "scale-set-name-instance-id",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
}
|
||||
testCases := map[string]struct {
|
||||
@ -116,7 +116,7 @@ func TestSelf(t *testing.T) {
|
||||
virtualMachinesAPI virtualMachinesAPI
|
||||
virtualMachineScaleSetVMsAPI virtualMachineScaleSetVMsAPI
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"self for individual instance works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
@ -210,6 +210,349 @@ func TestSignalRole(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNetworkSecurityGroupName(t *testing.T) {
|
||||
name := "network-security-group-name"
|
||||
testCases := map[string]struct {
|
||||
securityGroupsAPI securityGroupsAPI
|
||||
imdsAPI imdsAPI
|
||||
wantName string
|
||||
wantErr bool
|
||||
}{
|
||||
"GetNetworkSecurityGroupName works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
securityGroupsAPI: &stubSecurityGroupsAPI{
|
||||
listPages: [][]*armnetwork.SecurityGroup{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(name),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: name,
|
||||
},
|
||||
"no security group": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
securityGroupsAPI: &stubSecurityGroupsAPI{},
|
||||
wantErr: true,
|
||||
},
|
||||
"missing name in security group struct": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
securityGroupsAPI: &stubSecurityGroupsAPI{listPages: [][]*armnetwork.SecurityGroup{{{}}}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
metadata := Metadata{
|
||||
imdsAPI: tc.imdsAPI,
|
||||
securityGroupsAPI: tc.securityGroupsAPI,
|
||||
}
|
||||
name, err := metadata.GetNetworkSecurityGroupName(context.Background())
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantName, name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSubnetworkCIDR(t *testing.T) {
|
||||
subnetworkCIDR := "192.0.2.0/24"
|
||||
name := "name"
|
||||
testCases := map[string]struct {
|
||||
virtualNetworksAPI virtualNetworksAPI
|
||||
imdsAPI imdsAPI
|
||||
wantNetworkCIDR string
|
||||
wantErr bool
|
||||
}{
|
||||
"GetSubnetworkCIDR works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
virtualNetworksAPI: &stubVirtualNetworksAPI{listPages: [][]*armnetwork.VirtualNetwork{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(name),
|
||||
Properties: &armnetwork.VirtualNetworkPropertiesFormat{
|
||||
Subnets: []*armnetwork.Subnet{
|
||||
{Properties: &armnetwork.SubnetPropertiesFormat{AddressPrefix: to.StringPtr(subnetworkCIDR)}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantNetworkCIDR: subnetworkCIDR,
|
||||
},
|
||||
"no virtual networks found": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
virtualNetworksAPI: &stubVirtualNetworksAPI{listPages: [][]*armnetwork.VirtualNetwork{
|
||||
{},
|
||||
}},
|
||||
wantErr: true,
|
||||
wantNetworkCIDR: subnetworkCIDR,
|
||||
},
|
||||
"malformed network struct": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
virtualNetworksAPI: &stubVirtualNetworksAPI{listPages: [][]*armnetwork.VirtualNetwork{
|
||||
{
|
||||
{},
|
||||
},
|
||||
}},
|
||||
wantErr: true,
|
||||
wantNetworkCIDR: subnetworkCIDR,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
metadata := Metadata{
|
||||
imdsAPI: tc.imdsAPI,
|
||||
virtualNetworksAPI: tc.virtualNetworksAPI,
|
||||
}
|
||||
subnetworkCIDR, err := metadata.GetSubnetworkCIDR(context.Background())
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantNetworkCIDR, subnetworkCIDR)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerName(t *testing.T) {
|
||||
loadBalancerName := "load-balancer-name"
|
||||
testCases := map[string]struct {
|
||||
loadBalancerAPI loadBalancerAPI
|
||||
imdsAPI imdsAPI
|
||||
wantName string
|
||||
wantErr bool
|
||||
}{
|
||||
"GetLoadBalancerName works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(loadBalancerName),
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: loadBalancerName,
|
||||
},
|
||||
"invalid load balancer struct": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{listPages: [][]*armnetwork.LoadBalancer{{{}}}},
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid missing name": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{listPages: [][]*armnetwork.LoadBalancer{{{
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{},
|
||||
}}}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
metadata := Metadata{
|
||||
imdsAPI: tc.imdsAPI,
|
||||
loadBalancerAPI: tc.loadBalancerAPI,
|
||||
}
|
||||
loadbalancerName, err := metadata.GetLoadBalancerName(context.Background())
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantName, loadbalancerName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerIP(t *testing.T) {
|
||||
loadBalancerName := "load-balancer-name"
|
||||
publicIP := "192.0.2.1"
|
||||
correctPublicIPID := "/subscriptions/subscription/resourceGroups/resourceGroup/providers/Microsoft.Network/publicIPAddresses/pubIPName"
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
loadBalancerAPI loadBalancerAPI
|
||||
publicIPAddressesAPI publicIPAddressesAPI
|
||||
imdsAPI imdsAPI
|
||||
wantIP string
|
||||
wantErr bool
|
||||
}{
|
||||
"GetLoadBalancerIP works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(loadBalancerName),
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: &correctPublicIPID,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getResponse: armnetwork.PublicIPAddressesClientGetResponse{
|
||||
PublicIPAddressesClientGetResult: armnetwork.PublicIPAddressesClientGetResult{
|
||||
PublicIPAddress: armnetwork.PublicIPAddress{
|
||||
Properties: &armnetwork.PublicIPAddressPropertiesFormat{
|
||||
IPAddress: &publicIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantIP: publicIP,
|
||||
},
|
||||
"no load balancer": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"load balancer missing public IP reference": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(loadBalancerName),
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"public IP reference has wrong format": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(loadBalancerName),
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: to.StringPtr("wrong-format"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"no public IP address found": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(loadBalancerName),
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: &correctPublicIPID,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
"found public IP has no address field": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
loadBalancerAPI: &stubLoadBalancersAPI{
|
||||
listPages: [][]*armnetwork.LoadBalancer{
|
||||
{
|
||||
{
|
||||
Name: to.StringPtr(loadBalancerName),
|
||||
Properties: &armnetwork.LoadBalancerPropertiesFormat{
|
||||
FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: &correctPublicIPID,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getResponse: armnetwork.PublicIPAddressesClientGetResponse{
|
||||
PublicIPAddressesClientGetResult: armnetwork.PublicIPAddressesClientGetResult{
|
||||
PublicIPAddress: armnetwork.PublicIPAddress{
|
||||
Properties: &armnetwork.PublicIPAddressPropertiesFormat{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
metadata := Metadata{
|
||||
imdsAPI: tc.imdsAPI,
|
||||
loadBalancerAPI: tc.loadBalancerAPI,
|
||||
publicIPAddressesAPI: tc.publicIPAddressesAPI,
|
||||
}
|
||||
loadbalancerName, err := metadata.GetLoadBalancerIP(context.Background())
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantIP, loadbalancerName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetVPNIP(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
metadata := Metadata{}
|
||||
|
@ -11,53 +11,81 @@ import (
|
||||
)
|
||||
|
||||
// getVMInterfaces retrieves all network interfaces referenced by a virtual machine.
|
||||
func (m *Metadata) getVMInterfaces(ctx context.Context, vm armcompute.VirtualMachine, resourceGroup string) ([]*armnetwork.InterfaceIPConfiguration, error) {
|
||||
func (m *Metadata) getVMInterfaces(ctx context.Context, vm armcompute.VirtualMachine, resourceGroup string) ([]armnetwork.Interface, error) {
|
||||
if vm.Properties == nil || vm.Properties.NetworkProfile == nil {
|
||||
return []*armnetwork.InterfaceIPConfiguration{}, nil
|
||||
return []armnetwork.Interface{}, nil
|
||||
}
|
||||
interfaceNames := extractInterfaceNamesFromInterfaceReferences(vm.Properties.NetworkProfile.NetworkInterfaces)
|
||||
interfaceIPConfigurations := []*armnetwork.InterfaceIPConfiguration{}
|
||||
networkInterfaces := []armnetwork.Interface{}
|
||||
for _, interfaceName := range interfaceNames {
|
||||
networkInterfacesResp, err := m.networkInterfacesAPI.Get(ctx, resourceGroup, interfaceName, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve network interface %v: %w", interfaceName, err)
|
||||
}
|
||||
if networkInterfacesResp.Interface.Properties == nil || networkInterfacesResp.Interface.Properties.IPConfigurations == nil {
|
||||
return nil, errors.New("retrieved network interface has invalid ip configuration")
|
||||
}
|
||||
interfaceIPConfigurations = append(interfaceIPConfigurations, networkInterfacesResp.Properties.IPConfigurations...)
|
||||
networkInterfaces = append(networkInterfaces, networkInterfacesResp.Interface)
|
||||
}
|
||||
return interfaceIPConfigurations, nil
|
||||
return networkInterfaces, nil
|
||||
}
|
||||
|
||||
// getScaleSetVMInterfaces retrieves all network interfaces referenced by a scale set virtual machine.
|
||||
func (m *Metadata) getScaleSetVMInterfaces(ctx context.Context, vm armcompute.VirtualMachineScaleSetVM, resourceGroup, scaleSet, instanceID string) ([]*armnetwork.InterfaceIPConfiguration, error) {
|
||||
func (m *Metadata) getScaleSetVMInterfaces(ctx context.Context, vm armcompute.VirtualMachineScaleSetVM, resourceGroup, scaleSet, instanceID string) ([]armnetwork.Interface, error) {
|
||||
if vm.Properties == nil || vm.Properties.NetworkProfile == nil {
|
||||
return []*armnetwork.InterfaceIPConfiguration{}, nil
|
||||
return []armnetwork.Interface{}, nil
|
||||
}
|
||||
interfaceNames := extractInterfaceNamesFromInterfaceReferences(vm.Properties.NetworkProfile.NetworkInterfaces)
|
||||
interfaceIPConfigurations := []*armnetwork.InterfaceIPConfiguration{}
|
||||
networkInterfaces := []armnetwork.Interface{}
|
||||
for _, interfaceName := range interfaceNames {
|
||||
networkInterfacesResp, err := m.networkInterfacesAPI.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, scaleSet, instanceID, interfaceName, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve network interface %v: %w", interfaceName, err)
|
||||
}
|
||||
if networkInterfacesResp.Interface.Properties == nil || networkInterfacesResp.Interface.Properties.IPConfigurations == nil {
|
||||
return nil, errors.New("retrieved network interface has invalid ip configuration")
|
||||
}
|
||||
interfaceIPConfigurations = append(interfaceIPConfigurations, networkInterfacesResp.Properties.IPConfigurations...)
|
||||
networkInterfaces = append(networkInterfaces, networkInterfacesResp.Interface)
|
||||
}
|
||||
return interfaceIPConfigurations, nil
|
||||
return networkInterfaces, nil
|
||||
}
|
||||
|
||||
// getScaleSetVMPublicIPAddresses retrieves all public IP addresses from a network interface which is referenced by a scale set virtual machine.
|
||||
func (m *Metadata) getScaleSetVMPublicIPAddresses(ctx context.Context, resourceGroup, scaleSet, instanceID string,
|
||||
networkInterfaces []armnetwork.Interface,
|
||||
) ([]string, error) {
|
||||
var publicIPAddresses []string
|
||||
for _, networkInterface := range networkInterfaces {
|
||||
if networkInterface.Properties == nil || networkInterface.Name == nil {
|
||||
continue
|
||||
}
|
||||
for _, config := range networkInterface.Properties.IPConfigurations {
|
||||
if config == nil || config.Properties == nil || config.Properties.PublicIPAddress == nil || config.Name == nil {
|
||||
continue
|
||||
}
|
||||
publicIPAddressName := *config.Properties.PublicIPAddress.ID
|
||||
publicIPAddressNameParts := strings.Split(publicIPAddressName, "/")
|
||||
publicIPAddressName = publicIPAddressNameParts[len(publicIPAddressNameParts)-1]
|
||||
publicIPAddress, err := m.publicIPAddressesAPI.GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroup, scaleSet, instanceID, *networkInterface.Name, *config.Name, publicIPAddressName, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve public ip address %v: %w", publicIPAddressName, err)
|
||||
}
|
||||
if publicIPAddress.Properties == nil || publicIPAddress.Properties.IPAddress == nil {
|
||||
return nil, errors.New("retrieved public ip address has invalid ip address")
|
||||
}
|
||||
publicIPAddresses = append(publicIPAddresses, *publicIPAddress.Properties.IPAddress)
|
||||
}
|
||||
}
|
||||
return publicIPAddresses, nil
|
||||
}
|
||||
|
||||
// extractPrivateIPs extracts private IPs from a list of network interface IP configurations.
|
||||
func extractPrivateIPs(interfaceIPConfigs []*armnetwork.InterfaceIPConfiguration) []string {
|
||||
func extractPrivateIPs(networkInterfaces []armnetwork.Interface) []string {
|
||||
addresses := []string{}
|
||||
for _, config := range interfaceIPConfigs {
|
||||
if config == nil || config.Properties == nil || config.Properties.PrivateIPAddress == nil {
|
||||
for _, networkInterface := range networkInterfaces {
|
||||
if networkInterface.Properties == nil || len(networkInterface.Properties.IPConfigurations) == 0 {
|
||||
continue
|
||||
}
|
||||
addresses = append(addresses, *config.Properties.PrivateIPAddress)
|
||||
for _, config := range networkInterface.Properties.IPConfigurations {
|
||||
if config == nil || config.Properties == nil || config.Properties.PrivateIPAddress == nil {
|
||||
continue
|
||||
}
|
||||
addresses = append(addresses, *config.Properties.PrivateIPAddress)
|
||||
}
|
||||
}
|
||||
return addresses
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||
@ -12,10 +13,17 @@ import (
|
||||
)
|
||||
|
||||
func TestGetVMInterfaces(t *testing.T) {
|
||||
wantConfigs := []*armnetwork.InterfaceIPConfiguration{
|
||||
wantNetworkInterfaces := []armnetwork.Interface{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -31,25 +39,53 @@ func TestGetVMInterfaces(t *testing.T) {
|
||||
},
|
||||
}
|
||||
testCases := map[string]struct {
|
||||
vm armcompute.VirtualMachine
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
wantErr bool
|
||||
wantConfigs []*armnetwork.InterfaceIPConfiguration
|
||||
vm armcompute.VirtualMachine
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
wantErr bool
|
||||
wantNetworkInterfaces []armnetwork.Interface
|
||||
}{
|
||||
"retrieval works": {
|
||||
vm: vm,
|
||||
networkInterfacesAPI: newNetworkInterfacesStub(),
|
||||
wantConfigs: wantConfigs,
|
||||
vm: vm,
|
||||
networkInterfacesAPI: &stubNetworkInterfacesAPI{
|
||||
getInterface: armnetwork.Interface{
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNetworkInterfaces: wantNetworkInterfaces,
|
||||
},
|
||||
"vm can have 0 interfaces": {
|
||||
vm: armcompute.VirtualMachine{},
|
||||
networkInterfacesAPI: newNetworkInterfacesStub(),
|
||||
wantConfigs: []*armnetwork.InterfaceIPConfiguration{},
|
||||
vm: armcompute.VirtualMachine{},
|
||||
networkInterfacesAPI: &stubNetworkInterfacesAPI{
|
||||
getInterface: armnetwork.Interface{
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNetworkInterfaces: []armnetwork.Interface{},
|
||||
},
|
||||
"interface retrieval fails": {
|
||||
vm: vm,
|
||||
networkInterfacesAPI: newFailingNetworkInterfacesStub(),
|
||||
wantErr: true,
|
||||
vm: vm,
|
||||
networkInterfacesAPI: &stubNetworkInterfacesAPI{
|
||||
getErr: errors.New("get err"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -61,23 +97,30 @@ func TestGetVMInterfaces(t *testing.T) {
|
||||
metadata := Metadata{
|
||||
networkInterfacesAPI: tc.networkInterfacesAPI,
|
||||
}
|
||||
configs, err := metadata.getVMInterfaces(context.Background(), tc.vm, "resource-group")
|
||||
vmNetworkInteraces, err := metadata.getVMInterfaces(context.Background(), tc.vm, "resource-group")
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantConfigs, configs)
|
||||
assert.Equal(tc.wantNetworkInterfaces, vmNetworkInteraces)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMInterfaces(t *testing.T) {
|
||||
wantConfigs := []*armnetwork.InterfaceIPConfiguration{
|
||||
wantNetworkInterfaces := []armnetwork.Interface{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -93,25 +136,53 @@ func TestGetScaleSetVMInterfaces(t *testing.T) {
|
||||
},
|
||||
}
|
||||
testCases := map[string]struct {
|
||||
vm armcompute.VirtualMachineScaleSetVM
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
wantErr bool
|
||||
wantConfigs []*armnetwork.InterfaceIPConfiguration
|
||||
vm armcompute.VirtualMachineScaleSetVM
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
wantErr bool
|
||||
wantNetworkInterfaces []armnetwork.Interface
|
||||
}{
|
||||
"retrieval works": {
|
||||
vm: vm,
|
||||
networkInterfacesAPI: newNetworkInterfacesStub(),
|
||||
wantConfigs: wantConfigs,
|
||||
vm: vm,
|
||||
networkInterfacesAPI: &stubNetworkInterfacesAPI{
|
||||
getInterface: armnetwork.Interface{
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNetworkInterfaces: wantNetworkInterfaces,
|
||||
},
|
||||
"vm can have 0 interfaces": {
|
||||
vm: armcompute.VirtualMachineScaleSetVM{},
|
||||
networkInterfacesAPI: newNetworkInterfacesStub(),
|
||||
wantConfigs: []*armnetwork.InterfaceIPConfiguration{},
|
||||
vm: armcompute.VirtualMachineScaleSetVM{},
|
||||
networkInterfacesAPI: &stubNetworkInterfacesAPI{
|
||||
getInterface: armnetwork.Interface{
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantNetworkInterfaces: []armnetwork.Interface{},
|
||||
},
|
||||
"interface retrieval fails": {
|
||||
vm: vm,
|
||||
networkInterfacesAPI: newFailingNetworkInterfacesStub(),
|
||||
wantErr: true,
|
||||
vm: vm,
|
||||
networkInterfacesAPI: &stubNetworkInterfacesAPI{
|
||||
getErr: errors.New("get err"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -130,33 +201,147 @@ func TestGetScaleSetVMInterfaces(t *testing.T) {
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantConfigs, configs)
|
||||
assert.Equal(tc.wantNetworkInterfaces, configs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMPublicIPAddresses(t *testing.T) {
|
||||
someErr := errors.New("some err")
|
||||
newNetworkInterfaces := func() []armnetwork.Interface {
|
||||
return []armnetwork.Interface{{
|
||||
Name: to.StringPtr("interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Name: to.StringPtr("ip-config-name"),
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: to.StringPtr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Network/publicIPAddresses/public-ip-name"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: to.StringPtr("interface-name2"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Name: to.StringPtr("ip-config-name2"),
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &armnetwork.PublicIPAddress{
|
||||
ID: to.StringPtr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Network/publicIPAddresses/public-ip-name2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
networkInterfacesMutator func(*[]armnetwork.Interface)
|
||||
networkInterfaces []armnetwork.Interface
|
||||
publicIPAddressesAPI publicIPAddressesAPI
|
||||
wantIPs []string
|
||||
wantErr bool
|
||||
}{
|
||||
"retrieval works": {
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getVirtualMachineScaleSetPublicIPAddressResponse: armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse{
|
||||
PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResult: armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResult{
|
||||
PublicIPAddress: armnetwork.PublicIPAddress{
|
||||
Properties: &armnetwork.PublicIPAddressPropertiesFormat{
|
||||
IPAddress: to.StringPtr("192.0.2.1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
networkInterfaces: newNetworkInterfaces(),
|
||||
wantIPs: []string{"192.0.2.1", "192.0.2.1"},
|
||||
},
|
||||
"retrieval works for no valid interfaces": {
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getVirtualMachineScaleSetPublicIPAddressResponse: armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse{
|
||||
PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResult: armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResult{
|
||||
PublicIPAddress: armnetwork.PublicIPAddress{
|
||||
Properties: &armnetwork.PublicIPAddressPropertiesFormat{
|
||||
IPAddress: to.StringPtr("192.0.2.1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
networkInterfaces: newNetworkInterfaces(),
|
||||
networkInterfacesMutator: func(nets *[]armnetwork.Interface) {
|
||||
(*nets)[0].Properties.IPConfigurations = []*armnetwork.InterfaceIPConfiguration{nil}
|
||||
(*nets)[1] = armnetwork.Interface{Name: nil}
|
||||
},
|
||||
},
|
||||
"fail to get public IP": {
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getErr: someErr},
|
||||
networkInterfaces: newNetworkInterfaces(),
|
||||
wantErr: true,
|
||||
},
|
||||
"fail to parse IPv4 address of public IP": {
|
||||
publicIPAddressesAPI: &stubPublicIPAddressesAPI{getVirtualMachineScaleSetPublicIPAddressResponse: armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse{
|
||||
PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResult: armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResult{
|
||||
PublicIPAddress: armnetwork.PublicIPAddress{},
|
||||
},
|
||||
}},
|
||||
networkInterfaces: newNetworkInterfaces(),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
if tc.networkInterfacesMutator != nil {
|
||||
tc.networkInterfacesMutator(&tc.networkInterfaces)
|
||||
}
|
||||
|
||||
metadata := Metadata{
|
||||
publicIPAddressesAPI: tc.publicIPAddressesAPI,
|
||||
}
|
||||
|
||||
ips, err := metadata.getScaleSetVMPublicIPAddresses(context.Background(), "resource-group", "scale-set-name", "instance-id", tc.networkInterfaces)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantIPs, ips)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractPrivateIPs(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
interfaceIPConfigs []*armnetwork.InterfaceIPConfiguration
|
||||
wantIPs []string
|
||||
networkInterfaces []armnetwork.Interface
|
||||
wantIPs []string
|
||||
}{
|
||||
"extraction works": {
|
||||
interfaceIPConfigs: []*armnetwork.InterfaceIPConfiguration{
|
||||
networkInterfaces: []armnetwork.Interface{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
"can be empty": {
|
||||
interfaceIPConfigs: []*armnetwork.InterfaceIPConfiguration{},
|
||||
networkInterfaces: []armnetwork.Interface{},
|
||||
},
|
||||
"invalid interface is skipped": {
|
||||
interfaceIPConfigs: []*armnetwork.InterfaceIPConfiguration{
|
||||
{},
|
||||
},
|
||||
networkInterfaces: []armnetwork.Interface{{}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -164,7 +349,7 @@ func TestExtractPrivateIPs(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ips := extractPrivateIPs(tc.interfaceIPConfigs)
|
||||
ips := extractPrivateIPs(tc.networkInterfaces)
|
||||
|
||||
assert.ElementsMatch(tc.wantIPs, ips)
|
||||
})
|
||||
|
@ -7,31 +7,41 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
)
|
||||
|
||||
var (
|
||||
azureVMSSProviderIDRegexp = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
|
||||
coordinatorScaleSetRegexp = regexp.MustCompile(`constellation-scale-set-coordinators-[0-9a-zA-Z]+$`)
|
||||
nodeScaleSetRegexp = regexp.MustCompile(`constellation-scale-set-nodes-[0-9a-zA-Z]+$`)
|
||||
)
|
||||
|
||||
// getScaleSetVM tries to get an azure vm belonging to a scale set.
|
||||
func (m *Metadata) getScaleSetVM(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m *Metadata) getScaleSetVM(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
_, resourceGroup, scaleSet, instanceID, err := splitScaleSetProviderID(providerID)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
vmResp, err := m.virtualMachineScaleSetVMsAPI.Get(ctx, resourceGroup, scaleSet, instanceID, nil)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
interfaceIPConfigurations, err := m.getScaleSetVMInterfaces(ctx, vmResp.VirtualMachineScaleSetVM, resourceGroup, scaleSet, instanceID)
|
||||
networkInterfaces, err := m.getScaleSetVMInterfaces(ctx, vmResp.VirtualMachineScaleSetVM, resourceGroup, scaleSet, instanceID)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
publicIPAddresses, err := m.getScaleSetVMPublicIPAddresses(ctx, resourceGroup, scaleSet, instanceID, networkInterfaces)
|
||||
if err != nil {
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
|
||||
return convertScaleSetVMToCoreInstance(scaleSet, vmResp.VirtualMachineScaleSetVM, interfaceIPConfigurations)
|
||||
return convertScaleSetVMToCoreInstance(scaleSet, vmResp.VirtualMachineScaleSetVM, networkInterfaces, publicIPAddresses)
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists all scale set VMs in the current resource group.
|
||||
func (m *Metadata) listScaleSetVMs(ctx context.Context, resourceGroup string) ([]core.Instance, error) {
|
||||
instances := []core.Instance{}
|
||||
func (m *Metadata) listScaleSetVMs(ctx context.Context, resourceGroup string) ([]cloudtypes.Instance, error) {
|
||||
instances := []cloudtypes.Instance{}
|
||||
scaleSetPager := m.scaleSetsAPI.List(resourceGroup, nil)
|
||||
for scaleSetPager.NextPage(ctx) {
|
||||
for _, scaleSet := range scaleSetPager.PageResponse().Value {
|
||||
@ -48,7 +58,7 @@ func (m *Metadata) listScaleSetVMs(ctx context.Context, resourceGroup string) ([
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
instance, err := convertScaleSetVMToCoreInstance(*scaleSet.Name, *vm, interfaces)
|
||||
instance, err := convertScaleSetVMToCoreInstance(*scaleSet.Name, *vm, interfaces, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -64,9 +74,7 @@ func (m *Metadata) listScaleSetVMs(ctx context.Context, resourceGroup string) ([
|
||||
// A providerID for scale set VMs is build after the following schema:
|
||||
// - 'azure:///subscriptions/<subscription-id>/resourceGroups/<resource-group>/providers/Microsoft.Compute/virtualMachineScaleSets/<scale-set-name>/virtualMachines/<instance-id>'
|
||||
func splitScaleSetProviderID(providerID string) (subscriptionID, resourceGroup, scaleSet, instanceID string, err error) {
|
||||
// providerIDregex is a regex matching an azure scaleset vm providerID with each part of the URI being a submatch.
|
||||
providerIDregex := regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
|
||||
matches := providerIDregex.FindStringSubmatch(providerID)
|
||||
matches := azureVMSSProviderIDRegexp.FindStringSubmatch(providerID)
|
||||
if len(matches) != 5 {
|
||||
return "", "", "", "", errors.New("error splitting providerID")
|
||||
}
|
||||
@ -74,12 +82,12 @@ func splitScaleSetProviderID(providerID string) (subscriptionID, resourceGroup,
|
||||
}
|
||||
|
||||
// convertScaleSetVMToCoreInstance converts an azure scale set virtual machine with interface configurations into a core.Instance.
|
||||
func convertScaleSetVMToCoreInstance(scaleSet string, vm armcompute.VirtualMachineScaleSetVM, interfaceIPConfigs []*armnetwork.InterfaceIPConfiguration) (core.Instance, error) {
|
||||
func convertScaleSetVMToCoreInstance(scaleSet string, vm armcompute.VirtualMachineScaleSetVM, networkInterfaces []armnetwork.Interface, publicIPAddresses []string) (cloudtypes.Instance, error) {
|
||||
if vm.ID == nil {
|
||||
return core.Instance{}, errors.New("retrieving instance from armcompute API client returned no instance ID")
|
||||
return cloudtypes.Instance{}, errors.New("retrieving instance from armcompute API client returned no instance ID")
|
||||
}
|
||||
if vm.Properties == nil || vm.Properties.OSProfile == nil || vm.Properties.OSProfile.ComputerName == nil {
|
||||
return core.Instance{}, errors.New("retrieving instance from armcompute API client returned no computer name")
|
||||
return cloudtypes.Instance{}, errors.New("retrieving instance from armcompute API client returned no computer name")
|
||||
}
|
||||
var sshKeys map[string][]string
|
||||
if vm.Properties.OSProfile.LinuxConfiguration == nil || vm.Properties.OSProfile.LinuxConfiguration.SSH == nil {
|
||||
@ -87,19 +95,18 @@ func convertScaleSetVMToCoreInstance(scaleSet string, vm armcompute.VirtualMachi
|
||||
} else {
|
||||
sshKeys = extractSSHKeys(*vm.Properties.OSProfile.LinuxConfiguration.SSH)
|
||||
}
|
||||
return core.Instance{
|
||||
return cloudtypes.Instance{
|
||||
Name: *vm.Properties.OSProfile.ComputerName,
|
||||
ProviderID: "azure://" + *vm.ID,
|
||||
Role: extractScaleSetVMRole(scaleSet),
|
||||
IPs: extractPrivateIPs(interfaceIPConfigs),
|
||||
PrivateIPs: extractPrivateIPs(networkInterfaces),
|
||||
PublicIPs: publicIPAddresses,
|
||||
SSHKeys: sshKeys,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// extractScaleSetVMRole extracts the constellation role of a scale set using its name.
|
||||
func extractScaleSetVMRole(scaleSet string) role.Role {
|
||||
coordinatorScaleSetRegexp := regexp.MustCompile(`constellation-scale-set-coordinators-[0-9a-zA-Z]+$`)
|
||||
nodeScaleSetRegexp := regexp.MustCompile(`constellation-scale-set-nodes-[0-9a-zA-Z]+$`)
|
||||
if coordinatorScaleSetRegexp.MatchString(scaleSet) {
|
||||
return role.Coordinator
|
||||
}
|
||||
|
@ -8,17 +8,17 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetScaleSetVM(t *testing.T) {
|
||||
wantInstance := core.Instance{
|
||||
wantInstance := cloudtypes.Instance{
|
||||
Name: "scale-set-name-instance-id",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
}
|
||||
testCases := map[string]struct {
|
||||
@ -26,7 +26,7 @@ func TestGetScaleSetVM(t *testing.T) {
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
virtualMachineScaleSetVMsAPI virtualMachineScaleSetVMsAPI
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"getVM for scale set instance works": {
|
||||
providerID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||
@ -79,11 +79,11 @@ func TestGetScaleSetVM(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListScaleSetVMs(t *testing.T) {
|
||||
wantInstances := []core.Instance{
|
||||
wantInstances := []cloudtypes.Instance{
|
||||
{
|
||||
Name: "scale-set-name-instance-id",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
},
|
||||
}
|
||||
@ -93,7 +93,7 @@ func TestListScaleSetVMs(t *testing.T) {
|
||||
virtualMachineScaleSetVMsAPI virtualMachineScaleSetVMsAPI
|
||||
scaleSetsAPI scaleSetsAPI
|
||||
wantErr bool
|
||||
wantInstances []core.Instance
|
||||
wantInstances []cloudtypes.Instance
|
||||
}{
|
||||
"listVMs works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
@ -114,7 +114,7 @@ func TestListScaleSetVMs(t *testing.T) {
|
||||
networkInterfacesAPI: newNetworkInterfacesStub(),
|
||||
virtualMachineScaleSetVMsAPI: &stubVirtualMachineScaleSetVMsAPI{},
|
||||
scaleSetsAPI: newScaleSetsStub(),
|
||||
wantInstances: []core.Instance{},
|
||||
wantInstances: []cloudtypes.Instance{},
|
||||
},
|
||||
"can skip nil in VM list": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
@ -210,10 +210,11 @@ func TestSplitScaleSetProviderID(t *testing.T) {
|
||||
|
||||
func TestConvertScaleSetVMToCoreInstance(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
inVM armcompute.VirtualMachineScaleSetVM
|
||||
inInterfaceIPConfigs []*armnetwork.InterfaceIPConfiguration
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
inVM armcompute.VirtualMachineScaleSetVM
|
||||
inInterface []armnetwork.Interface
|
||||
inPublicIPs []string
|
||||
wantErr bool
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"conversion works": {
|
||||
inVM: armcompute.VirtualMachineScaleSetVM{
|
||||
@ -226,17 +227,27 @@ func TestConvertScaleSetVMToCoreInstance(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
inInterfaceIPConfigs: []*armnetwork.InterfaceIPConfiguration{
|
||||
inInterface: []armnetwork.Interface{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
Name: to.StringPtr("scale-set-name_instance-id"),
|
||||
ID: to.StringPtr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Network/networkInterfaces/interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
inPublicIPs: []string{"192.0.2.100", "192.0.2.101"},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "scale-set-name-instance-id",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name/virtualMachines/instance-id",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
PublicIPs: []string{"192.0.2.100", "192.0.2.101"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
@ -251,7 +262,7 @@ func TestConvertScaleSetVMToCoreInstance(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
instance, err := convertScaleSetVMToCoreInstance("scale-set", tc.inVM, tc.inInterfaceIPConfigs)
|
||||
instance, err := convertScaleSetVMToCoreInstance("scale-set", tc.inVM, tc.inInterface, tc.inPublicIPs)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
|
19
coordinator/cloudprovider/azure/securityGroup.go
Normal file
19
coordinator/cloudprovider/azure/securityGroup.go
Normal file
@ -0,0 +1,19 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
)
|
||||
|
||||
// getNetworkSecurityGroup retrieves the list of security groups for the given resource group.
|
||||
func (m *Metadata) getNetworkSecurityGroup(ctx context.Context, resourceGroup string) (*armnetwork.SecurityGroup, error) {
|
||||
pager := m.securityGroupsAPI.List(resourceGroup, nil)
|
||||
for pager.NextPage(ctx) {
|
||||
for _, securityGroup := range pager.PageResponse().Value {
|
||||
return securityGroup, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no security group found for resource group %q", resourceGroup)
|
||||
}
|
@ -11,29 +11,31 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
)
|
||||
|
||||
var azureVMProviderIDRegexp = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachines/([^/]+)$`)
|
||||
|
||||
// getVM tries to get a single azure vm.
|
||||
func (m *Metadata) getVM(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m *Metadata) getVM(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
_, resourceGroup, instanceName, err := splitVMProviderID(providerID)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
vmResp, err := m.virtualMachinesAPI.Get(ctx, resourceGroup, instanceName, nil)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
interfaceIPConfigurations, err := m.getVMInterfaces(ctx, vmResp.VirtualMachine, resourceGroup)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
return convertVMToCoreInstance(vmResp.VirtualMachine, interfaceIPConfigurations)
|
||||
}
|
||||
|
||||
// listVMs lists all individual VMs in the current resource group.
|
||||
func (m *Metadata) listVMs(ctx context.Context, resourceGroup string) ([]core.Instance, error) {
|
||||
instances := []core.Instance{}
|
||||
func (m *Metadata) listVMs(ctx context.Context, resourceGroup string) ([]cloudtypes.Instance, error) {
|
||||
instances := []cloudtypes.Instance{}
|
||||
pager := m.virtualMachinesAPI.List(resourceGroup, nil)
|
||||
for pager.NextPage(ctx) {
|
||||
for _, vm := range pager.PageResponse().Value {
|
||||
@ -75,19 +77,17 @@ func (m *Metadata) setTag(ctx context.Context, key, value string) error {
|
||||
// A providerID for individual VMs is build after the following schema:
|
||||
// - 'azure:///subscriptions/<subscription-id>/resourceGroups/<resource-group>/providers/Microsoft.Compute/virtualMachines/<instance-name>'
|
||||
func splitVMProviderID(providerID string) (subscriptionID, resourceGroup, instanceName string, err error) {
|
||||
// providerIDregex is a regex matching an azure vm providerID with each part of the URI being a submatch.
|
||||
providerIDregex := regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachines/([^/]+)$`)
|
||||
matches := providerIDregex.FindStringSubmatch(providerID)
|
||||
matches := azureVMProviderIDRegexp.FindStringSubmatch(providerID)
|
||||
if len(matches) != 4 {
|
||||
return "", "", "", errors.New("error splitting providerID")
|
||||
}
|
||||
return matches[1], matches[2], matches[3], nil
|
||||
}
|
||||
|
||||
// convertVMToCoreInstance converts an azure virtual machine with interface configurations into a core.Instance.
|
||||
func convertVMToCoreInstance(vm armcompute.VirtualMachine, interfaceIPConfigs []*armnetwork.InterfaceIPConfiguration) (core.Instance, error) {
|
||||
// convertVMToCoreInstance converts an azure virtual machine with interface configurations into a cloudtypes.Instance.
|
||||
func convertVMToCoreInstance(vm armcompute.VirtualMachine, networkInterfaces []armnetwork.Interface) (cloudtypes.Instance, error) {
|
||||
if vm.Name == nil || vm.ID == nil {
|
||||
return core.Instance{}, fmt.Errorf("retrieving instance from armcompute API client returned invalid instance Name (%v) or ID (%v)", vm.Name, vm.ID)
|
||||
return cloudtypes.Instance{}, fmt.Errorf("retrieving instance from armcompute API client returned invalid instance Name (%v) or ID (%v)", vm.Name, vm.ID)
|
||||
}
|
||||
var sshKeys map[string][]string
|
||||
if vm.Properties == nil || vm.Properties.OSProfile == nil || vm.Properties.OSProfile.LinuxConfiguration == nil || vm.Properties.OSProfile.LinuxConfiguration.SSH == nil {
|
||||
@ -96,11 +96,11 @@ func convertVMToCoreInstance(vm armcompute.VirtualMachine, interfaceIPConfigs []
|
||||
sshKeys = extractSSHKeys(*vm.Properties.OSProfile.LinuxConfiguration.SSH)
|
||||
}
|
||||
metadata := extractInstanceTags(vm.Tags)
|
||||
return core.Instance{
|
||||
return cloudtypes.Instance{
|
||||
Name: *vm.Name,
|
||||
ProviderID: "azure://" + *vm.ID,
|
||||
Role: cloudprovider.ExtractRole(metadata),
|
||||
IPs: extractPrivateIPs(interfaceIPConfigs),
|
||||
PrivateIPs: extractPrivateIPs(networkInterfaces),
|
||||
SSHKeys: sshKeys,
|
||||
}, nil
|
||||
}
|
||||
|
@ -8,16 +8,16 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetVM(t *testing.T) {
|
||||
wantInstance := core.Instance{
|
||||
wantInstance := cloudtypes.Instance{
|
||||
Name: "instance-name",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
}
|
||||
testCases := map[string]struct {
|
||||
@ -25,7 +25,7 @@ func TestGetVM(t *testing.T) {
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
virtualMachinesAPI virtualMachinesAPI
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"getVM for individual instance works": {
|
||||
providerID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
@ -78,11 +78,11 @@ func TestGetVM(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListVMs(t *testing.T) {
|
||||
wantInstances := []core.Instance{
|
||||
wantInstances := []cloudtypes.Instance{
|
||||
{
|
||||
Name: "instance-name",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
},
|
||||
}
|
||||
@ -91,7 +91,7 @@ func TestListVMs(t *testing.T) {
|
||||
networkInterfacesAPI networkInterfacesAPI
|
||||
virtualMachinesAPI virtualMachinesAPI
|
||||
wantErr bool
|
||||
wantInstances []core.Instance
|
||||
wantInstances []cloudtypes.Instance
|
||||
}{
|
||||
"listVMs works": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
@ -103,7 +103,7 @@ func TestListVMs(t *testing.T) {
|
||||
imdsAPI: newIMDSStub(),
|
||||
networkInterfacesAPI: newNetworkInterfacesStub(),
|
||||
virtualMachinesAPI: &stubVirtualMachinesAPI{},
|
||||
wantInstances: []core.Instance{},
|
||||
wantInstances: []cloudtypes.Instance{},
|
||||
},
|
||||
"can skip nil in VM list": {
|
||||
imdsAPI: newIMDSStub(),
|
||||
@ -234,10 +234,10 @@ func TestSplitVMProviderID(t *testing.T) {
|
||||
|
||||
func TestConvertVMToCoreInstance(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
inVM armcompute.VirtualMachine
|
||||
inInterfaceIPConfigs []*armnetwork.InterfaceIPConfiguration
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
inVM armcompute.VirtualMachine
|
||||
inInterface []armnetwork.Interface
|
||||
wantErr bool
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"conversion works": {
|
||||
inVM: armcompute.VirtualMachine{
|
||||
@ -259,17 +259,25 @@ func TestConvertVMToCoreInstance(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
inInterfaceIPConfigs: []*armnetwork.InterfaceIPConfiguration{
|
||||
inInterface: []armnetwork.Interface{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
Name: to.StringPtr("interface-name"),
|
||||
ID: to.StringPtr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Network/networkInterfaces/interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "instance-name",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"user": {"key-data"}},
|
||||
},
|
||||
},
|
||||
@ -279,17 +287,25 @@ func TestConvertVMToCoreInstance(t *testing.T) {
|
||||
ID: to.StringPtr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name"),
|
||||
Tags: map[string]*string{"tag-key": to.StringPtr("tag-value")},
|
||||
},
|
||||
inInterfaceIPConfigs: []*armnetwork.InterfaceIPConfiguration{
|
||||
inInterface: []armnetwork.Interface{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
Name: to.StringPtr("interface-name"),
|
||||
ID: to.StringPtr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Network/networkInterfaces/interface-name"),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{
|
||||
Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.StringPtr("192.0.2.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "instance-name",
|
||||
ProviderID: "azure:///subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachines/instance-name",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
@ -304,7 +320,7 @@ func TestConvertVMToCoreInstance(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
instance, err := convertVMToCoreInstance(tc.inVM, tc.inInterfaceIPConfigs)
|
||||
instance, err := convertVMToCoreInstance(tc.inVM, tc.inInterface)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
|
21
coordinator/cloudprovider/azure/virtualnetwork.go
Normal file
21
coordinator/cloudprovider/azure/virtualnetwork.go
Normal file
@ -0,0 +1,21 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
|
||||
)
|
||||
|
||||
// getVirtualNetwork return the first virtual network found in the resource group.
|
||||
func (m *Metadata) getVirtualNetwork(ctx context.Context, resourceGroup string) (*armnetwork.VirtualNetwork, error) {
|
||||
pager := m.virtualNetworksAPI.List(resourceGroup, nil)
|
||||
for pager.NextPage(ctx) {
|
||||
for _, network := range pager.PageResponse().Value {
|
||||
if network != nil {
|
||||
return network, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no virtual network found in resource group %s", resourceGroup)
|
||||
}
|
@ -8,6 +8,22 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
||||
)
|
||||
|
||||
type virtualNetworksClient struct {
|
||||
*armnetwork.VirtualNetworksClient
|
||||
}
|
||||
|
||||
func (c *virtualNetworksClient) List(resourceGroupName string, options *armnetwork.VirtualNetworksClientListOptions) virtualNetworksClientListPager {
|
||||
return c.VirtualNetworksClient.List(resourceGroupName, options)
|
||||
}
|
||||
|
||||
type securityGroupsClient struct {
|
||||
*armnetwork.SecurityGroupsClient
|
||||
}
|
||||
|
||||
func (c *securityGroupsClient) List(resourceGroupName string, options *armnetwork.SecurityGroupsClientListOptions) securityGroupsClientListPager {
|
||||
return c.SecurityGroupsClient.List(resourceGroupName, options)
|
||||
}
|
||||
|
||||
type networkInterfacesClient struct {
|
||||
*armnetwork.InterfacesClient
|
||||
}
|
||||
@ -25,6 +41,32 @@ func (c *networkInterfacesClient) Get(ctx context.Context, resourceGroupName str
|
||||
return c.InterfacesClient.Get(ctx, resourceGroupName, networkInterfaceName, options)
|
||||
}
|
||||
|
||||
type publicIPAddressesClient struct {
|
||||
*armnetwork.PublicIPAddressesClient
|
||||
}
|
||||
|
||||
func (c *publicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string,
|
||||
virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string,
|
||||
ipConfigurationName string, publicIPAddressName string,
|
||||
options *armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions,
|
||||
) (armnetwork.PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse, error) {
|
||||
return c.PublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, ipConfigurationName, publicIPAddressName, options)
|
||||
}
|
||||
|
||||
func (c *publicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string,
|
||||
options *armnetwork.PublicIPAddressesClientGetOptions,
|
||||
) (armnetwork.PublicIPAddressesClientGetResponse, error) {
|
||||
return c.PublicIPAddressesClient.Get(ctx, resourceGroupName, publicIPAddressName, options)
|
||||
}
|
||||
|
||||
type loadBalancersClient struct {
|
||||
*armnetwork.LoadBalancersClient
|
||||
}
|
||||
|
||||
func (c *loadBalancersClient) List(resourceGroupName string, options *armnetwork.LoadBalancersClientListOptions) loadBalancersClientListPager {
|
||||
return c.LoadBalancersClient.List(resourceGroupName, options)
|
||||
}
|
||||
|
||||
type virtualMachinesClient struct {
|
||||
*armcompute.VirtualMachinesClient
|
||||
}
|
||||
|
15
coordinator/cloudprovider/cloudtypes/instance.go
Normal file
15
coordinator/cloudprovider/cloudtypes/instance.go
Normal file
@ -0,0 +1,15 @@
|
||||
package cloudtypes
|
||||
|
||||
import "github.com/edgelesssys/constellation/coordinator/role"
|
||||
|
||||
// Instance describes metadata of a peer.
|
||||
type Instance struct {
|
||||
Name string
|
||||
ProviderID string
|
||||
Role role.Role
|
||||
PrivateIPs []string
|
||||
PublicIPs []string
|
||||
AliasIPRanges []string
|
||||
// SSHKeys maps usernames to ssh public keys.
|
||||
SSHKeys map[string][]string
|
||||
}
|
@ -16,6 +16,12 @@ type instanceAPI interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
type subnetworkAPI interface {
|
||||
List(ctx context.Context, req *computepb.ListSubnetworksRequest, opts ...gax.CallOption) SubnetworkIterator
|
||||
Get(ctx context.Context, req *computepb.GetSubnetworkRequest, opts ...gax.CallOption) (*computepb.Subnetwork, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
type metadataAPI interface {
|
||||
InstanceAttributeValue(attr string) (string, error)
|
||||
ProjectID() (string, error)
|
||||
@ -30,3 +36,7 @@ type Operation interface {
|
||||
type InstanceIterator interface {
|
||||
Next() (*computepb.Instance, error)
|
||||
}
|
||||
|
||||
type SubnetworkIterator interface {
|
||||
Next() (*computepb.Subnetwork, error)
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
@ -15,7 +15,7 @@ func (a *Autoscaler) Name() string {
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
func (a *Autoscaler) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (a *Autoscaler) Secrets(instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ package gcp
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -12,7 +12,7 @@ func TestTrivialAutoscalerFunctions(t *testing.T) {
|
||||
autoscaler := Autoscaler{}
|
||||
|
||||
assert.NotEmpty(autoscaler.Name())
|
||||
assert.Empty(autoscaler.Secrets(core.Instance{}, ""))
|
||||
assert.Empty(autoscaler.Secrets(cloudtypes.Instance{}, ""))
|
||||
assert.NotEmpty(autoscaler.Volumes())
|
||||
assert.NotEmpty(autoscaler.VolumeMounts())
|
||||
assert.NotEmpty(autoscaler.Env())
|
||||
|
@ -1,12 +1,13 @@
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -34,14 +35,17 @@ func (c *CloudControllerManager) Name() string {
|
||||
func (c *CloudControllerManager) ExtraArgs() []string {
|
||||
return []string{
|
||||
"--use-service-account-credentials",
|
||||
"--controllers=cloud-node,cloud-node-lifecycle",
|
||||
"--controllers=cloud-node,cloud-node-lifecycle,nodeipam,service,route",
|
||||
"--cloud-config=/etc/gce/gce.conf",
|
||||
"--cidr-allocator-type=CloudAllocator",
|
||||
"--allocate-node-cidrs=true",
|
||||
"--configure-cloud-routes=false",
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
|
||||
func (c *CloudControllerManager) ConfigMaps(instance core.Instance) (resources.ConfigMaps, error) {
|
||||
func (c *CloudControllerManager) ConfigMaps(instance cloudtypes.Instance) (resources.ConfigMaps, error) {
|
||||
// GCP CCM expects cloud config to contain the GCP project-id and other configuration.
|
||||
// reference: https://github.com/kubernetes/cloud-provider-gcp/blob/master/cluster/gce/gci/configure-helper.sh#L791-L892
|
||||
var config strings.Builder
|
||||
@ -51,7 +55,10 @@ func (c *CloudControllerManager) ConfigMaps(instance core.Instance) (resources.C
|
||||
return resources.ConfigMaps{}, err
|
||||
}
|
||||
config.WriteString(fmt.Sprintf("project-id = %s\n", projectID))
|
||||
config.WriteString("use-metadata-server = false\n")
|
||||
config.WriteString("use-metadata-server = true\n")
|
||||
|
||||
nameParts := strings.Split(instance.Name, "-")
|
||||
config.WriteString("node-tags = constellation-" + nameParts[len(nameParts)-2] + "\n")
|
||||
|
||||
return resources.ConfigMaps{
|
||||
&k8s.ConfigMap{
|
||||
@ -72,7 +79,7 @@ func (c *CloudControllerManager) ConfigMaps(instance core.Instance) (resources.C
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
|
||||
func (c *CloudControllerManager) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (c *CloudControllerManager) Secrets(ctx context.Context, instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
serviceAccountKey, err := getServiceAccountKey(cloudServiceAccountURI)
|
||||
if err != nil {
|
||||
return resources.Secrets{}, err
|
||||
@ -150,12 +157,6 @@ func (c *CloudControllerManager) Env() []k8s.EnvVar {
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareInstance is called on every instance before deploying the cloud-controller-manager.
|
||||
// Allows for cloud-provider specific hooks.
|
||||
func (c *CloudControllerManager) PrepareInstance(instance core.Instance, vpnIP string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Supported is used to determine if cloud controller manager is implemented for this cloud provider.
|
||||
func (c *CloudControllerManager) Supported() bool {
|
||||
return true
|
||||
|
@ -1,11 +1,12 @@
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/cli/gcp/client"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -15,12 +16,12 @@ import (
|
||||
|
||||
func TestConfigMaps(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
instance core.Instance
|
||||
instance cloudtypes.Instance
|
||||
wantConfigMaps resources.ConfigMaps
|
||||
wantErr bool
|
||||
}{
|
||||
"ConfigMaps works": {
|
||||
instance: core.Instance{ProviderID: "gce://project-id/zone/instance-name"},
|
||||
instance: cloudtypes.Instance{ProviderID: "gce://project-id/zone/instanceName-UID-0", Name: "instanceName-UID-0"},
|
||||
wantConfigMaps: resources.ConfigMaps{
|
||||
&k8s.ConfigMap{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
@ -34,14 +35,15 @@ func TestConfigMaps(t *testing.T) {
|
||||
Data: map[string]string{
|
||||
"gce.conf": `[global]
|
||||
project-id = project-id
|
||||
use-metadata-server = false
|
||||
use-metadata-server = true
|
||||
node-tags = constellation-UID
|
||||
`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"invalid providerID fails": {
|
||||
instance: core.Instance{ProviderID: "invalid"},
|
||||
instance: cloudtypes.Instance{ProviderID: "invalid"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
@ -80,7 +82,7 @@ func TestSecrets(t *testing.T) {
|
||||
rawKey, err := json.Marshal(serviceAccountKey)
|
||||
require.NoError(t, err)
|
||||
testCases := map[string]struct {
|
||||
instance core.Instance
|
||||
instance cloudtypes.Instance
|
||||
cloudServiceAccountURI string
|
||||
wantSecrets resources.Secrets
|
||||
wantErr bool
|
||||
@ -115,7 +117,7 @@ func TestSecrets(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
cloud := CloudControllerManager{}
|
||||
secrets, err := cloud.Secrets(tc.instance, tc.cloudServiceAccountURI)
|
||||
secrets, err := cloud.Secrets(context.Background(), tc.instance, tc.cloudServiceAccountURI)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
@ -137,6 +139,5 @@ func TestTrivialCCMFunctions(t *testing.T) {
|
||||
assert.NotEmpty(cloud.Volumes())
|
||||
assert.NotEmpty(cloud.VolumeMounts())
|
||||
assert.NotEmpty(cloud.Env())
|
||||
assert.NoError(cloud.PrepareInstance(core.Instance{}, "192.0.2.0"))
|
||||
assert.True(cloud.Supported())
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
compute "cloud.google.com/go/compute/apiv1"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"google.golang.org/api/iterator"
|
||||
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
|
||||
@ -16,9 +17,12 @@ import (
|
||||
|
||||
const gcpSSHMetadataKey = "ssh-keys"
|
||||
|
||||
var providerIDRegex = regexp.MustCompile(`^gce://([^/]+)/([^/]+)/([^/]+)$`)
|
||||
|
||||
// Client implements the gcp.API interface.
|
||||
type Client struct {
|
||||
instanceAPI
|
||||
subnetworkAPI
|
||||
metadataAPI
|
||||
}
|
||||
|
||||
@ -28,11 +32,15 @@ func NewClient(ctx context.Context) (*Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{instanceAPI: &instanceClient{insAPI}, metadataAPI: &metadataClient{}}, nil
|
||||
subnetAPI, err := compute.NewSubnetworksRESTClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{instanceAPI: &instanceClient{insAPI}, subnetworkAPI: &subnetworkClient{subnetAPI}, metadataAPI: &metadataClient{}}, nil
|
||||
}
|
||||
|
||||
// RetrieveInstances returns list of instances including their ips and metadata.
|
||||
func (c *Client) RetrieveInstances(ctx context.Context, project, zone string) ([]core.Instance, error) {
|
||||
func (c *Client) RetrieveInstances(ctx context.Context, project, zone string) ([]cloudtypes.Instance, error) {
|
||||
uid, err := c.uid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -43,7 +51,7 @@ func (c *Client) RetrieveInstances(ctx context.Context, project, zone string) ([
|
||||
}
|
||||
instanceIterator := c.instanceAPI.List(ctx, req)
|
||||
|
||||
instances := []core.Instance{}
|
||||
instances := []cloudtypes.Instance{}
|
||||
for {
|
||||
resp, err := instanceIterator.Next()
|
||||
if err == iterator.Done {
|
||||
@ -68,10 +76,10 @@ func (c *Client) RetrieveInstances(ctx context.Context, project, zone string) ([
|
||||
}
|
||||
|
||||
// RetrieveInstance returns a an instance including ips and metadata.
|
||||
func (c *Client) RetrieveInstance(ctx context.Context, project, zone, instanceName string) (core.Instance, error) {
|
||||
func (c *Client) RetrieveInstance(ctx context.Context, project, zone, instanceName string) (cloudtypes.Instance, error) {
|
||||
instance, err := c.getComputeInstance(ctx, project, zone, instanceName)
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
|
||||
return convertToCoreInstance(instance, project, zone)
|
||||
@ -156,8 +164,45 @@ func (c *Client) UnsetInstanceMetadata(ctx context.Context, project, zone, insta
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetrieveSubnetworkAliasCIDR returns the alias CIDR of the subnetwork specified by project, zone and subnetworkName.
|
||||
func (c *Client) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) {
|
||||
instance, err := c.getComputeInstance(ctx, project, zone, instanceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if instance == nil || instance.NetworkInterfaces == nil || len(instance.NetworkInterfaces) == 0 || instance.NetworkInterfaces[0].Subnetwork == nil {
|
||||
return "", fmt.Errorf("retrieving instance network interfaces failed")
|
||||
}
|
||||
subnetworkURL := *instance.NetworkInterfaces[0].Subnetwork
|
||||
subnetworkURLFragments := strings.Split(subnetworkURL, "/")
|
||||
subnetworkName := subnetworkURLFragments[len(subnetworkURLFragments)-1]
|
||||
|
||||
// convert:
|
||||
// zone --> region
|
||||
// europe-west3-b --> europe-west3
|
||||
regionParts := strings.Split(zone, "-")
|
||||
region := strings.TrimSuffix(zone, "-"+regionParts[len(regionParts)-1])
|
||||
|
||||
req := &computepb.GetSubnetworkRequest{
|
||||
Project: project,
|
||||
Region: region,
|
||||
Subnetwork: subnetworkName,
|
||||
}
|
||||
subnetwork, err := c.subnetworkAPI.Get(ctx, req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("retrieving subnetwork alias CIDR failed: %w", err)
|
||||
}
|
||||
if subnetwork == nil || subnetwork.IpCidrRange == nil || *subnetwork.IpCidrRange == "" {
|
||||
return "", fmt.Errorf("retrieving subnetwork alias CIDR returned invalid results")
|
||||
}
|
||||
return *subnetwork.IpCidrRange, nil
|
||||
}
|
||||
|
||||
// Close closes the instanceAPI client.
|
||||
func (c *Client) Close() error {
|
||||
if err := c.subnetworkAPI.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.instanceAPI.Close()
|
||||
}
|
||||
|
||||
@ -199,8 +244,8 @@ func (c *Client) uid() (string, error) {
|
||||
return uid, nil
|
||||
}
|
||||
|
||||
// extractIPs extracts private interface IPs from a list of interfaces.
|
||||
func extractIPs(interfaces []*computepb.NetworkInterface) []string {
|
||||
// extractPrivateIPs extracts private interface IPs from a list of interfaces.
|
||||
func extractPrivateIPs(interfaces []*computepb.NetworkInterface) []string {
|
||||
ips := []string{}
|
||||
for _, interf := range interfaces {
|
||||
if interf == nil || interf.NetworkIP == nil {
|
||||
@ -211,6 +256,40 @@ func extractIPs(interfaces []*computepb.NetworkInterface) []string {
|
||||
return ips
|
||||
}
|
||||
|
||||
// extractPublicIPs extracts public interface IPs from a list of interfaces.
|
||||
func extractPublicIPs(interfaces []*computepb.NetworkInterface) []string {
|
||||
ips := []string{}
|
||||
for _, interf := range interfaces {
|
||||
if interf == nil || interf.AccessConfigs == nil {
|
||||
continue
|
||||
}
|
||||
for _, accessConfig := range interf.AccessConfigs {
|
||||
if accessConfig == nil || accessConfig.NatIP == nil {
|
||||
continue
|
||||
}
|
||||
ips = append(ips, *accessConfig.NatIP)
|
||||
}
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
// extractAliasIPRanges extracts alias interface IPs from a list of interfaces.
|
||||
func extractAliasIPRanges(interfaces []*computepb.NetworkInterface) []string {
|
||||
ips := []string{}
|
||||
for _, interf := range interfaces {
|
||||
if interf == nil || interf.AliasIpRanges == nil {
|
||||
continue
|
||||
}
|
||||
for _, aliasIP := range interf.AliasIpRanges {
|
||||
if aliasIP == nil || aliasIP.IpCidrRange == nil {
|
||||
continue
|
||||
}
|
||||
ips = append(ips, *aliasIP.IpCidrRange)
|
||||
}
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
// extractSSHKeys extracts SSH keys from GCP instance metadata.
|
||||
// reference: https://cloud.google.com/compute/docs/connect/add-ssh-keys .
|
||||
func extractSSHKeys(metadata map[string]string) map[string][]string {
|
||||
@ -239,17 +318,19 @@ func extractSSHKeys(metadata map[string]string) map[string][]string {
|
||||
}
|
||||
|
||||
// convertToCoreInstance converts a *computepb.Instance to a core.Instance.
|
||||
func convertToCoreInstance(in *computepb.Instance, project string, zone string) (core.Instance, error) {
|
||||
func convertToCoreInstance(in *computepb.Instance, project string, zone string) (cloudtypes.Instance, error) {
|
||||
if in.Name == nil {
|
||||
return core.Instance{}, fmt.Errorf("retrieving instance from compute API client returned invalid instance Name: %v", in.Name)
|
||||
return cloudtypes.Instance{}, fmt.Errorf("retrieving instance from compute API client returned invalid instance Name: %v", in.Name)
|
||||
}
|
||||
metadata := extractInstanceMetadata(in.Metadata, "", false)
|
||||
return core.Instance{
|
||||
Name: *in.Name,
|
||||
ProviderID: joinProviderID(project, zone, *in.Name),
|
||||
Role: cloudprovider.ExtractRole(metadata),
|
||||
IPs: extractIPs(in.NetworkInterfaces),
|
||||
SSHKeys: extractSSHKeys(metadata),
|
||||
return cloudtypes.Instance{
|
||||
Name: *in.Name,
|
||||
ProviderID: joinProviderID(project, zone, *in.Name),
|
||||
Role: cloudprovider.ExtractRole(metadata),
|
||||
PrivateIPs: extractPrivateIPs(in.NetworkInterfaces),
|
||||
PublicIPs: extractPublicIPs(in.NetworkInterfaces),
|
||||
AliasIPRanges: extractAliasIPRanges(in.NetworkInterfaces),
|
||||
SSHKeys: extractSSHKeys(metadata),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -262,9 +343,7 @@ func joinProviderID(project, zone, instanceName string) string {
|
||||
// splitProviderID splits a provider's id into core components.
|
||||
// A providerID is build after the schema 'gce://<project-id>/<zone>/<instance-name>'
|
||||
func splitProviderID(providerID string) (project, zone, instance string, err error) {
|
||||
// providerIDregex is a regex matching a gce providerID with each part of the URI being a submatch.
|
||||
providerIDregex := regexp.MustCompile(`^gce://([^/]+)/([^/]+)/([^/]+)$`)
|
||||
matches := providerIDregex.FindStringSubmatch(providerID)
|
||||
matches := providerIDRegex.FindStringSubmatch(providerID)
|
||||
if len(matches) != 4 {
|
||||
return "", "", "", fmt.Errorf("error splitting providerID: %v", providerID)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
compute "cloud.google.com/go/compute/apiv1"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
@ -53,7 +54,11 @@ func TestRetrieveInstances(t *testing.T) {
|
||||
},
|
||||
},
|
||||
NetworkInterfaces: []*computepb.NetworkInterface{
|
||||
{NetworkIP: proto.String("192.0.2.0")},
|
||||
{
|
||||
NetworkIP: proto.String("192.0.2.0"),
|
||||
AliasIpRanges: []*computepb.AliasIpRange{{IpCidrRange: proto.String("192.0.2.0/16")}},
|
||||
AccessConfigs: []*computepb.AccessConfig{{NatIP: proto.String("192.0.2.1")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -65,20 +70,22 @@ func TestRetrieveInstances(t *testing.T) {
|
||||
metadata stubMetadataClient
|
||||
instanceIter *stubInstanceIterator
|
||||
instanceIterMutator func(*stubInstanceIterator)
|
||||
wantInstances []core.Instance
|
||||
wantInstances []cloudtypes.Instance
|
||||
wantErr bool
|
||||
}{
|
||||
"retrieve works": {
|
||||
client: stubInstancesClient{},
|
||||
metadata: stubMetadataClient{InstanceValue: uid},
|
||||
instanceIter: newTestIter(),
|
||||
wantInstances: []core.Instance{
|
||||
wantInstances: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -94,13 +101,15 @@ func TestRetrieveInstances(t *testing.T) {
|
||||
metadata: stubMetadataClient{InstanceValue: uid},
|
||||
instanceIter: newTestIter(),
|
||||
instanceIterMutator: func(sii *stubInstanceIterator) { sii.instances[0].NetworkInterfaces = nil },
|
||||
wantInstances: []core.Instance{
|
||||
wantInstances: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
IPs: []string{},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
AliasIPRanges: []string{},
|
||||
PublicIPs: []string{},
|
||||
PrivateIPs: []string{},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -109,13 +118,15 @@ func TestRetrieveInstances(t *testing.T) {
|
||||
metadata: stubMetadataClient{InstanceValue: uid},
|
||||
instanceIter: newTestIter(),
|
||||
instanceIterMutator: func(sii *stubInstanceIterator) { sii.instances[0].NetworkInterfaces[0].NetworkIP = nil },
|
||||
wantInstances: []core.Instance{
|
||||
wantInstances: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
IPs: []string{},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -124,7 +135,7 @@ func TestRetrieveInstances(t *testing.T) {
|
||||
metadata: stubMetadataClient{InstanceValue: uid},
|
||||
instanceIter: newTestIter(),
|
||||
instanceIterMutator: func(sii *stubInstanceIterator) { sii.instances[0].Metadata.Items[2].Key = proto.String("") },
|
||||
wantInstances: []core.Instance{},
|
||||
wantInstances: []cloudtypes.Instance{},
|
||||
},
|
||||
"constellation retrieval fails": {
|
||||
client: stubInstancesClient{},
|
||||
@ -137,13 +148,15 @@ func TestRetrieveInstances(t *testing.T) {
|
||||
metadata: stubMetadataClient{InstanceValue: uid},
|
||||
instanceIter: newTestIter(),
|
||||
instanceIterMutator: func(sii *stubInstanceIterator) { sii.instances[0].Metadata.Items[3].Key = proto.String("") },
|
||||
wantInstances: []core.Instance{
|
||||
wantInstances: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Unknown,
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Unknown,
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -198,7 +211,11 @@ func TestRetrieveInstance(t *testing.T) {
|
||||
},
|
||||
},
|
||||
NetworkInterfaces: []*computepb.NetworkInterface{
|
||||
{NetworkIP: proto.String("192.0.2.0")},
|
||||
{
|
||||
NetworkIP: proto.String("192.0.2.0"),
|
||||
AliasIpRanges: []*computepb.AliasIpRange{{IpCidrRange: proto.String("192.0.2.0/16")}},
|
||||
AccessConfigs: []*computepb.AccessConfig{{NatIP: proto.String("192.0.2.1")}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -207,17 +224,19 @@ func TestRetrieveInstance(t *testing.T) {
|
||||
client stubInstancesClient
|
||||
clientInstance *computepb.Instance
|
||||
clientInstanceMutator func(*computepb.Instance)
|
||||
wantInstance core.Instance
|
||||
wantInstance cloudtypes.Instance
|
||||
wantErr bool
|
||||
}{
|
||||
"retrieve works": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"retrieve with SSH key works": {
|
||||
@ -227,11 +246,13 @@ func TestRetrieveInstance(t *testing.T) {
|
||||
i.Metadata.Items[0].Key = proto.String("ssh-keys")
|
||||
i.Metadata.Items[0].Value = proto.String("bob:ssh-rsa bobskey")
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
},
|
||||
},
|
||||
"retrieve with Role works": {
|
||||
@ -241,12 +262,14 @@ func TestRetrieveInstance(t *testing.T) {
|
||||
i.Metadata.Items[0].Key = proto.String(core.RoleMetadataKey)
|
||||
i.Metadata.Items[0].Value = proto.String(role.Coordinator.String())
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
Role: role.Coordinator,
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
Role: role.Coordinator,
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"retrieve fails": {
|
||||
@ -260,55 +283,91 @@ func TestRetrieveInstance(t *testing.T) {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.Metadata.Items[0] = nil },
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"metadata key is null": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.Metadata.Items[0].Key = nil },
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"metadata value is null": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.Metadata.Items[0].Value = nil },
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"instance without network ip": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.NetworkInterfaces[0] = nil },
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{},
|
||||
PublicIPs: []string{},
|
||||
PrivateIPs: []string{},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"network ip is nil": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.NetworkInterfaces[0].NetworkIP = nil },
|
||||
wantInstance: core.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{},
|
||||
SSHKeys: map[string][]string{},
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"network alias cidr is nil": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.NetworkInterfaces[0].AliasIpRanges[0].IpCidrRange = nil },
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{},
|
||||
PublicIPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
"network public ip is nil": {
|
||||
client: stubInstancesClient{},
|
||||
clientInstance: newTestInstance(),
|
||||
clientInstanceMutator: func(i *computepb.Instance) { i.NetworkInterfaces[0].AccessConfigs[0].NatIP = nil },
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
AliasIPRanges: []string{"192.0.2.0/16"},
|
||||
PublicIPs: []string{},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
SSHKeys: map[string][]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -620,15 +679,107 @@ func TestUnsetInstanceMetadata(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetrieveSubnetworkAliasCIDR(t *testing.T) {
|
||||
aliasCIDR := "192.0.2.1/24"
|
||||
someErr := errors.New("some error")
|
||||
testCases := map[string]struct {
|
||||
stubInstancesClient stubInstancesClient
|
||||
stubSubnetworksClient stubSubnetworksClient
|
||||
wantAliasCIDR string
|
||||
wantErr bool
|
||||
}{
|
||||
"RetrieveSubnetworkAliasCIDR works": {
|
||||
stubInstancesClient: stubInstancesClient{
|
||||
GetInstance: &computepb.Instance{
|
||||
NetworkInterfaces: []*computepb.NetworkInterface{
|
||||
{
|
||||
Subnetwork: proto.String("projects/project/regions/region/subnetworks/subnetwork"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
stubSubnetworksClient: stubSubnetworksClient{
|
||||
GetSubnetwork: &computepb.Subnetwork{
|
||||
IpCidrRange: &aliasCIDR,
|
||||
},
|
||||
},
|
||||
wantAliasCIDR: aliasCIDR,
|
||||
},
|
||||
"instance has no network interface": {
|
||||
stubInstancesClient: stubInstancesClient{
|
||||
GetInstance: &computepb.Instance{
|
||||
NetworkInterfaces: []*computepb.NetworkInterface{},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot get instance": {
|
||||
stubInstancesClient: stubInstancesClient{
|
||||
GetErr: someErr,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot get subnetwork": {
|
||||
stubInstancesClient: stubInstancesClient{
|
||||
GetInstance: &computepb.Instance{
|
||||
NetworkInterfaces: []*computepb.NetworkInterface{
|
||||
{
|
||||
Subnetwork: proto.String("projects/project/regions/region/subnetworks/subnetwork"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
stubSubnetworksClient: stubSubnetworksClient{
|
||||
GetErr: someErr,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"subnetwork has no cidr range": {
|
||||
stubInstancesClient: stubInstancesClient{
|
||||
GetInstance: &computepb.Instance{
|
||||
NetworkInterfaces: []*computepb.NetworkInterface{
|
||||
{
|
||||
Subnetwork: proto.String("projects/project/regions/region/subnetworks/subnetwork"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
stubSubnetworksClient: stubSubnetworksClient{
|
||||
GetSubnetwork: &computepb.Subnetwork{},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{instanceAPI: tc.stubInstancesClient, subnetworkAPI: tc.stubSubnetworksClient}
|
||||
aliasCIDR, err := client.RetrieveSubnetworkAliasCIDR(context.Background(), "project", "zone", "subnetwork")
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantAliasCIDR, aliasCIDR)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
client := Client{instanceAPI: stubInstancesClient{}}
|
||||
client := Client{instanceAPI: stubInstancesClient{}, subnetworkAPI: stubSubnetworksClient{}}
|
||||
assert.NoError(client.Close())
|
||||
|
||||
client = Client{instanceAPI: stubInstancesClient{CloseErr: someErr}}
|
||||
client = Client{instanceAPI: stubInstancesClient{CloseErr: someErr}, subnetworkAPI: stubSubnetworksClient{}}
|
||||
assert.Error(client.Close())
|
||||
|
||||
client = Client{instanceAPI: stubInstancesClient{}, subnetworkAPI: stubSubnetworksClient{CloseErr: someErr}}
|
||||
assert.Error(client.Close())
|
||||
}
|
||||
|
||||
@ -725,6 +876,25 @@ func (s stubInstancesClient) Close() error {
|
||||
return s.CloseErr
|
||||
}
|
||||
|
||||
type stubSubnetworksClient struct {
|
||||
GetSubnetwork *computepb.Subnetwork
|
||||
GetErr error
|
||||
SubnetworkIterator SubnetworkIterator
|
||||
CloseErr error
|
||||
}
|
||||
|
||||
func (s stubSubnetworksClient) Get(ctx context.Context, req *computepb.GetSubnetworkRequest, opts ...gax.CallOption) (*computepb.Subnetwork, error) {
|
||||
return s.GetSubnetwork, s.GetErr
|
||||
}
|
||||
|
||||
func (s stubSubnetworksClient) List(ctx context.Context, req *computepb.ListSubnetworksRequest, opts ...gax.CallOption) SubnetworkIterator {
|
||||
return s.SubnetworkIterator
|
||||
}
|
||||
|
||||
func (s stubSubnetworksClient) Close() error {
|
||||
return s.CloseErr
|
||||
}
|
||||
|
||||
type stubMetadataClient struct {
|
||||
InstanceValue string
|
||||
InstanceErr error
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
)
|
||||
@ -11,9 +12,9 @@ import (
|
||||
// API handles all GCP API requests.
|
||||
type API interface {
|
||||
// RetrieveInstances retrieves a list of all accessible GCP instances with their metadata.
|
||||
RetrieveInstances(ctx context.Context, project, zone string) ([]core.Instance, error)
|
||||
RetrieveInstances(ctx context.Context, project, zone string) ([]cloudtypes.Instance, error)
|
||||
// RetrieveInstances retrieves a single GCP instances with its metadata.
|
||||
RetrieveInstance(ctx context.Context, project, zone, instanceName string) (core.Instance, error)
|
||||
RetrieveInstance(ctx context.Context, project, zone, instanceName string) (cloudtypes.Instance, error)
|
||||
// RetrieveInstanceMetadata retrieves the GCP instance metadata of the current instance.
|
||||
RetrieveInstanceMetadata(attr string) (string, error)
|
||||
// RetrieveProjectID retrieves the GCP projectID containing the current instance.
|
||||
@ -22,6 +23,8 @@ type API interface {
|
||||
RetrieveZone() (string, error)
|
||||
// RetrieveInstanceName retrieves the instance name of the current instance.
|
||||
RetrieveInstanceName() (string, error)
|
||||
// RetrieveSubnetworkAliasCIDR retrieves the subnetwork CIDR of the current instance.
|
||||
RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error)
|
||||
// SetInstanceMetadata sets metadata key: value of the instance specified by project, zone and instanceName.
|
||||
SetInstanceMetadata(ctx context.Context, project, zone, instanceName, key, value string) error
|
||||
// UnsetInstanceMetadata removes a metadata key-value pair of the instance specified by project, zone and instanceName.
|
||||
@ -41,7 +44,7 @@ func New(api API) *Metadata {
|
||||
}
|
||||
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
func (m *Metadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
func (m *Metadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
project, err := m.api.RetrieveProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -58,27 +61,27 @@ func (m *Metadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
}
|
||||
|
||||
// Self retrieves the current instance.
|
||||
func (m *Metadata) Self(ctx context.Context) (core.Instance, error) {
|
||||
func (m *Metadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
project, err := m.api.RetrieveProjectID()
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
zone, err := m.api.RetrieveZone()
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
instanceName, err := m.api.RetrieveInstanceName()
|
||||
if err != nil {
|
||||
return core.Instance{}, err
|
||||
return cloudtypes.Instance{}, err
|
||||
}
|
||||
return m.api.RetrieveInstance(ctx, project, zone, instanceName)
|
||||
}
|
||||
|
||||
// GetInstance retrieves an instance using its providerID.
|
||||
func (m *Metadata) GetInstance(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m *Metadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
project, zone, instanceName, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return core.Instance{}, fmt.Errorf("invalid providerID: %w", err)
|
||||
return cloudtypes.Instance{}, fmt.Errorf("invalid providerID: %w", err)
|
||||
}
|
||||
return m.api.RetrieveInstance(ctx, project, zone, instanceName)
|
||||
}
|
||||
@ -117,6 +120,33 @@ func (m *Metadata) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
return m.api.SetInstanceMetadata(ctx, project, zone, instanceName, core.VPNIPMetadataKey, vpnIP)
|
||||
}
|
||||
|
||||
// GetSubnetworkCIDR returns the subnetwork CIDR of the current instance.
|
||||
func (m *Metadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
|
||||
project, err := m.api.RetrieveProjectID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, err := m.api.RetrieveZone()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
instanceName, err := m.api.RetrieveInstanceName()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return m.api.RetrieveSubnetworkAliasCIDR(ctx, project, zone, instanceName)
|
||||
}
|
||||
|
||||
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
|
||||
func (m *Metadata) SupportsLoadBalancer() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetLoadBalancerIP returns the IP of the load balancer.
|
||||
func (m *Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Supported is used to determine if metadata API is implemented for this cloud provider.
|
||||
func (m *Metadata) Supported() bool {
|
||||
return true
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -14,22 +15,22 @@ import (
|
||||
func TestList(t *testing.T) {
|
||||
err := errors.New("some err")
|
||||
uid := "1234"
|
||||
instancesGenerator := func() *[]core.Instance {
|
||||
return &[]core.Instance{
|
||||
instancesGenerator := func() *[]cloudtypes.Instance {
|
||||
return &[]cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
client stubGCPClient
|
||||
instancesGenerator func() *[]core.Instance
|
||||
instancesMutator func(*[]core.Instance)
|
||||
instancesGenerator func() *[]cloudtypes.Instance
|
||||
instancesMutator func(*[]cloudtypes.Instance)
|
||||
wantErr bool
|
||||
wantInstances []core.Instance
|
||||
wantInstances []cloudtypes.Instance
|
||||
}{
|
||||
"retrieve works": {
|
||||
client: stubGCPClient{
|
||||
@ -40,11 +41,11 @@ func TestList(t *testing.T) {
|
||||
},
|
||||
},
|
||||
instancesGenerator: instancesGenerator,
|
||||
wantInstances: []core.Instance{
|
||||
wantInstances: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -105,22 +106,22 @@ func TestSelf(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
client stubGCPClient
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"retrieve works": {
|
||||
client: stubGCPClient{
|
||||
projectID: "someProjectID",
|
||||
zone: "someZone",
|
||||
retrieveInstanceValue: core.Instance{
|
||||
retrieveInstanceValue: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
},
|
||||
"retrieve error is detected": {
|
||||
@ -179,21 +180,21 @@ func TestGetInstance(t *testing.T) {
|
||||
providerID string
|
||||
client stubGCPClient
|
||||
wantErr bool
|
||||
wantInstance core.Instance
|
||||
wantInstance cloudtypes.Instance
|
||||
}{
|
||||
"retrieve works": {
|
||||
providerID: "gce://someProject/someZone/someInstance",
|
||||
client: stubGCPClient{
|
||||
retrieveInstanceValue: core.Instance{
|
||||
retrieveInstanceValue: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
},
|
||||
wantInstance: core.Instance{
|
||||
wantInstance: cloudtypes.Instance{
|
||||
Name: "someInstance",
|
||||
ProviderID: "gce://someProject/someZone/someInstance",
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
},
|
||||
"retrieve error is detected": {
|
||||
@ -357,12 +358,13 @@ func TestTrivialMetadataFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
type stubGCPClient struct {
|
||||
retrieveInstanceValue core.Instance
|
||||
retrieveInstanceValue cloudtypes.Instance
|
||||
retrieveInstanceErr error
|
||||
retrieveInstancesValues []core.Instance
|
||||
retrieveInstancesValues []cloudtypes.Instance
|
||||
retrieveInstancesErr error
|
||||
retrieveInstanceMetadaValues map[string]string
|
||||
retrieveInstanceMetadataErr error
|
||||
retrieveSubentworkAliasErr error
|
||||
projectID string
|
||||
zone string
|
||||
instanceName string
|
||||
@ -384,11 +386,11 @@ type stubGCPClient struct {
|
||||
unsetMetadataKeys []string
|
||||
}
|
||||
|
||||
func (s *stubGCPClient) RetrieveInstances(ctx context.Context, project, zone string) ([]core.Instance, error) {
|
||||
func (s *stubGCPClient) RetrieveInstances(ctx context.Context, project, zone string) ([]cloudtypes.Instance, error) {
|
||||
return s.retrieveInstancesValues, s.retrieveInstancesErr
|
||||
}
|
||||
|
||||
func (s *stubGCPClient) RetrieveInstance(ctx context.Context, project, zone string, instanceName string) (core.Instance, error) {
|
||||
func (s *stubGCPClient) RetrieveInstance(ctx context.Context, project, zone string, instanceName string) (cloudtypes.Instance, error) {
|
||||
return s.retrieveInstanceValue, s.retrieveInstanceErr
|
||||
}
|
||||
|
||||
@ -426,3 +428,7 @@ func (s *stubGCPClient) UnsetInstanceMetadata(ctx context.Context, project, zone
|
||||
|
||||
return s.unsetInstanceMetadataErr
|
||||
}
|
||||
|
||||
func (s *stubGCPClient) RetrieveSubnetworkAliasCIDR(ctx context.Context, project, zone, instanceName string) (string, error) {
|
||||
return "", s.retrieveSubentworkAliasErr
|
||||
}
|
||||
|
@ -23,6 +23,26 @@ func (c *instanceClient) List(ctx context.Context, req *computepb.ListInstancesR
|
||||
return c.InstancesClient.List(ctx, req)
|
||||
}
|
||||
|
||||
type subnetworkClient struct {
|
||||
*compute.SubnetworksClient
|
||||
}
|
||||
|
||||
func (c *subnetworkClient) Close() error {
|
||||
return c.SubnetworksClient.Close()
|
||||
}
|
||||
|
||||
func (c *subnetworkClient) List(ctx context.Context, req *computepb.ListSubnetworksRequest,
|
||||
opts ...gax.CallOption,
|
||||
) SubnetworkIterator {
|
||||
return c.SubnetworksClient.List(ctx, req)
|
||||
}
|
||||
|
||||
func (c *subnetworkClient) Get(ctx context.Context, req *computepb.GetSubnetworkRequest,
|
||||
opts ...gax.CallOption,
|
||||
) (*computepb.Subnetwork, error) {
|
||||
return c.SubnetworksClient.Get(ctx, req)
|
||||
}
|
||||
|
||||
type metadataClient struct{}
|
||||
|
||||
func (c *metadataClient) InstanceAttributeValue(attr string) (string, error) {
|
||||
|
@ -5,9 +5,9 @@ const (
|
||||
CloudControllerManagerImageAWS = "us.gcr.io/k8s-artifacts-prod/provider-aws/cloud-controller-manager:v1.22.0-alpha.0"
|
||||
// CloudControllerManagerImageGCP is the CCM image used on GCP.
|
||||
// TODO: use newer "cloud-provider-gcp" from https://github.com/kubernetes/cloud-provider-gcp when newer releases are available.
|
||||
CloudControllerManagerImageGCP = "ghcr.io/malt3/cloud-provider-gcp:latest"
|
||||
CloudControllerManagerImageGCP = "ghcr.io/edgelesssys/cloud-provider-gcp:sha-2f6a5b07fc2d37f24f8ff725132f87584d627d8f"
|
||||
// CloudControllerManagerImageAzure is the CCM image used on Azure.
|
||||
CloudControllerManagerImageAzure = "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.23.5"
|
||||
CloudControllerManagerImageAzure = "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.23.11"
|
||||
// CloudNodeManagerImageAzure is the cloud-node-manager image used on Azure.
|
||||
CloudNodeManagerImageAzure = "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.23.5"
|
||||
CloudNodeManagerImageAzure = "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.23.11"
|
||||
)
|
||||
|
@ -1,7 +1,7 @@
|
||||
package qemu
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
@ -15,7 +15,7 @@ func (a Autoscaler) Name() string {
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
func (a Autoscaler) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (a Autoscaler) Secrets(instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,9 @@
|
||||
package qemu
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
@ -31,13 +33,13 @@ func (c CloudControllerManager) ExtraArgs() []string {
|
||||
|
||||
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
|
||||
func (c CloudControllerManager) ConfigMaps(instance core.Instance) (resources.ConfigMaps, error) {
|
||||
func (c CloudControllerManager) ConfigMaps(instance cloudtypes.Instance) (resources.ConfigMaps, error) {
|
||||
return resources.ConfigMaps{}, nil
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
|
||||
func (c CloudControllerManager) Secrets(instance core.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
func (c CloudControllerManager) Secrets(ctx context.Context, instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
@ -59,7 +61,7 @@ func (c CloudControllerManager) Env() []k8s.EnvVar {
|
||||
|
||||
// PrepareInstance is called on every instance before deploying the cloud-controller-manager.
|
||||
// Allows for cloud-provider specific hooks.
|
||||
func (c CloudControllerManager) PrepareInstance(instance core.Instance, vpnIP string) error {
|
||||
func (c CloudControllerManager) PrepareInstance(instance cloudtypes.Instance, vpnIP string) error {
|
||||
// no specific hook required.
|
||||
return nil
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package qemu
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
)
|
||||
|
||||
@ -16,17 +16,17 @@ func (m *Metadata) Supported() bool {
|
||||
}
|
||||
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
func (m *Metadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
func (m *Metadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
panic("function *Metadata.List not implemented")
|
||||
}
|
||||
|
||||
// Self retrieves the current instance.
|
||||
func (m *Metadata) Self(ctx context.Context) (core.Instance, error) {
|
||||
func (m *Metadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
panic("function *Metdata.Self not implemented")
|
||||
}
|
||||
|
||||
// GetInstance retrieves an instance using its providerID.
|
||||
func (m Metadata) GetInstance(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m Metadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
panic("function *Metadata.GetInstance not implemented")
|
||||
}
|
||||
|
||||
@ -39,3 +39,18 @@ func (m Metadata) SignalRole(ctx context.Context, role role.Role) error {
|
||||
func (m Metadata) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
panic("function *Metadata.SetVPNIP not implemented")
|
||||
}
|
||||
|
||||
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
|
||||
func (m Metadata) SupportsLoadBalancer() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetLoadBalancerIP returns the IP of the load balancer.
|
||||
func (m Metadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
|
||||
panic("function *Metadata.GetLoadBalancerIP not implemented")
|
||||
}
|
||||
|
||||
// GetSubnetworkCIDR retrieves the subnetwork CIDR from cloud provider metadata.
|
||||
func (m Metadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
|
||||
panic("function *Metadata.GetSubnetworkCIDR not implemented")
|
||||
}
|
||||
|
@ -42,10 +42,7 @@ func main() {
|
||||
var bindIP, bindPort, etcdEndpoint string
|
||||
var enforceEtcdTls bool
|
||||
var kube core.Cluster
|
||||
var metadata core.ProviderMetadata
|
||||
var cloudControllerManager core.CloudControllerManager
|
||||
var cloudNodeManager core.CloudNodeManager
|
||||
var autoscaler core.ClusterAutoscaler
|
||||
var coreMetadata core.ProviderMetadata
|
||||
var encryptedDisk core.EncryptedDisk
|
||||
cfg := zap.NewDevelopmentConfig()
|
||||
|
||||
@ -87,15 +84,13 @@ func main() {
|
||||
issuer = gcp.NewIssuer()
|
||||
validator = gcp.NewValidator(pcrs)
|
||||
|
||||
kube = kubernetes.New(k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New())
|
||||
gcpClient, err := gcpcloud.NewClient(context.Background())
|
||||
if err != nil {
|
||||
log.Fatalf("creating GCP client failed: %v\n", err)
|
||||
}
|
||||
metadata = gcpcloud.New(gcpClient)
|
||||
cloudControllerManager = &gcpcloud.CloudControllerManager{}
|
||||
cloudNodeManager = &gcpcloud.CloudNodeManager{}
|
||||
autoscaler = &gcpcloud.Autoscaler{}
|
||||
metadata := gcpcloud.New(gcpClient)
|
||||
coreMetadata = metadata
|
||||
kube = kubernetes.New("gcp", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &gcpcloud.CloudControllerManager{}, &gcpcloud.CloudNodeManager{}, &gcpcloud.Autoscaler{}, metadata)
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
@ -112,14 +107,13 @@ func main() {
|
||||
issuer = azure.NewIssuer()
|
||||
validator = azure.NewValidator(pcrs)
|
||||
|
||||
kube = kubernetes.New(k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New())
|
||||
metadata, err = azurecloud.NewMetadata(context.Background())
|
||||
metadata, err := azurecloud.NewMetadata(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cloudControllerManager = &azurecloud.CloudControllerManager{}
|
||||
cloudNodeManager = &azurecloud.CloudNodeManager{}
|
||||
autoscaler = &azurecloud.Autoscaler{}
|
||||
coreMetadata = metadata
|
||||
kube = kubernetes.New("azure", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), azurecloud.NewCloudControllerManager(metadata), &azurecloud.CloudNodeManager{}, &azurecloud.Autoscaler{}, metadata)
|
||||
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
@ -136,13 +130,10 @@ func main() {
|
||||
issuer = qemu.NewIssuer()
|
||||
validator = qemu.NewValidator(pcrs)
|
||||
|
||||
kube = kubernetes.New(k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New())
|
||||
|
||||
// no support for cloud services in qemu
|
||||
metadata = &qemucloud.Metadata{}
|
||||
cloudControllerManager = &qemucloud.CloudControllerManager{}
|
||||
cloudNodeManager = &qemucloud.CloudNodeManager{}
|
||||
autoscaler = &qemucloud.Autoscaler{}
|
||||
metadata := &qemucloud.Metadata{}
|
||||
kube = kubernetes.New("qemu", k8sapi.NewKubernetesUtil(), &k8sapi.CoreOSConfiguration{}, kubectl.New(), &qemucloud.CloudControllerManager{}, &qemucloud.CloudNodeManager{}, &qemucloud.Autoscaler{}, metadata)
|
||||
coreMetadata = metadata
|
||||
|
||||
encryptedDisk = diskencryption.New()
|
||||
bindIP = defaultIP
|
||||
@ -155,10 +146,7 @@ func main() {
|
||||
issuer = core.NewMockIssuer()
|
||||
validator = core.NewMockValidator()
|
||||
kube = &core.ClusterFake{}
|
||||
metadata = &core.ProviderMetadataFake{}
|
||||
cloudControllerManager = &core.CloudControllerManagerFake{}
|
||||
cloudNodeManager = &core.CloudNodeManagerFake{}
|
||||
autoscaler = &core.ClusterAutoscalerFake{}
|
||||
coreMetadata = &core.ProviderMetadataFake{}
|
||||
encryptedDisk = &core.EncryptedDiskFake{}
|
||||
bindIP = defaultIP
|
||||
bindPort = defaultPort
|
||||
@ -174,5 +162,5 @@ func main() {
|
||||
netDialer := &net.Dialer{}
|
||||
dialer := grpcutil.NewDialer(validator, netDialer)
|
||||
run(issuer, wg, openTPM, util.GetIPAddr, dialer, fileHandler, kube,
|
||||
metadata, cloudControllerManager, cloudNodeManager, autoscaler, encryptedDisk, etcdEndpoint, enforceEtcdTls, bindIP, bindPort, zapLoggerCore, fs)
|
||||
coreMetadata, encryptedDisk, etcdEndpoint, enforceEtcdTls, bindIP, bindPort, zapLoggerCore, fs)
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
var version = "0.0.0"
|
||||
|
||||
func run(issuer core.QuoteIssuer, vpn core.VPN, openTPM vtpm.TPMOpenFunc, getPublicIPAddr func() (string, error), dialer *grpcutil.Dialer, fileHandler file.Handler,
|
||||
kube core.Cluster, metadata core.ProviderMetadata, cloudControllerManager core.CloudControllerManager, cloudNodeManager core.CloudNodeManager, clusterAutoscaler core.ClusterAutoscaler, encryptedDisk core.EncryptedDisk, etcdEndpoint string, etcdTLS bool, bindIP, bindPort string, zapLoggerCore *zap.Logger,
|
||||
kube core.Cluster, metadata core.ProviderMetadata, encryptedDisk core.EncryptedDisk, etcdEndpoint string, etcdTLS bool, bindIP, bindPort string, zapLoggerCore *zap.Logger,
|
||||
fs afero.Fs,
|
||||
) {
|
||||
defer zapLoggerCore.Sync()
|
||||
@ -47,7 +47,7 @@ func run(issuer core.QuoteIssuer, vpn core.VPN, openTPM vtpm.TPMOpenFunc, getPub
|
||||
Logger: zapLoggerCore.WithOptions(zap.IncreaseLevel(zap.WarnLevel)).Named("etcd"),
|
||||
}
|
||||
linuxUserManager := user.NewLinuxUserManager(fs)
|
||||
core, err := core.NewCore(vpn, kube, metadata, cloudControllerManager, cloudNodeManager, clusterAutoscaler, encryptedDisk, zapLoggerCore, openTPM, etcdStoreFactory, fileHandler, linuxUserManager)
|
||||
core, err := core.NewCore(vpn, kube, metadata, encryptedDisk, zapLoggerCore, openTPM, etcdStoreFactory, fileHandler, linuxUserManager)
|
||||
if err != nil {
|
||||
zapLoggerCore.Fatal("failed to create core", zap.Error(err))
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ func TestConcurrent(t *testing.T) {
|
||||
func spawnPeer(require *require.Assertions, logger *zap.Logger, netDialer *testdialer.BufconnDialer, netw *network, endpoint string) (*grpc.Server, *pubapi.API, *fakeVPN) {
|
||||
vpn := newVPN(netw, endpoint)
|
||||
fs := afero.NewMemMapFs()
|
||||
cor, err := core.NewCore(vpn, &core.ClusterFake{}, &core.ProviderMetadataFake{}, &core.CloudControllerManagerFake{}, &core.CloudNodeManagerFake{}, &core.ClusterAutoscalerFake{}, &core.EncryptedDiskFake{}, logger, simulator.OpenSimulatedTPM, fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
cor, err := core.NewCore(vpn, &core.ClusterFake{}, &core.ProviderMetadataFake{}, &core.EncryptedDiskFake{}, logger, simulator.OpenSimulatedTPM, fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
require.NoError(cor.AdvanceState(state.AcceptingInit, nil, nil))
|
||||
|
||||
|
@ -6,9 +6,8 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var ErrUnimplemented = errors.New("unimplemented")
|
||||
@ -20,24 +19,12 @@ const (
|
||||
VPNIPMetadataKey = "constellation-vpn-ip"
|
||||
)
|
||||
|
||||
// Instance describes a cloud-provider instance including name, providerID, ip addresses and instance metadata.
|
||||
type Instance struct {
|
||||
Name string
|
||||
ProviderID string
|
||||
Role role.Role
|
||||
IPs []string
|
||||
// SSHKeys maps usernames to ssh public keys.
|
||||
SSHKeys map[string][]string
|
||||
}
|
||||
|
||||
// ProviderMetadata implementers read/write cloud provider metadata.
|
||||
type ProviderMetadata interface {
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
List(ctx context.Context) ([]Instance, error)
|
||||
List(ctx context.Context) ([]cloudtypes.Instance, error)
|
||||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (Instance, error)
|
||||
// GetInstance retrieves an instance using its providerID.
|
||||
GetInstance(ctx context.Context, providerID string) (Instance, error)
|
||||
Self(ctx context.Context) (cloudtypes.Instance, error)
|
||||
// SignalRole signals the constellation role via cloud provider metadata (if supported by the CSP and deployment type, otherwise does nothing).
|
||||
SignalRole(ctx context.Context, role role.Role) error
|
||||
// SetVPNIP stores the internally used VPN IP in cloud provider metadata (if supported and required for autoscaling by the CSP, otherwise does nothing).
|
||||
@ -46,121 +33,19 @@ type ProviderMetadata interface {
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// CloudControllerManager implementers provide configuration for the k8s cloud-controller-manager.
|
||||
type CloudControllerManager interface {
|
||||
// Image returns the container image used to provide cloud-controller-manager for the cloud-provider.
|
||||
Image() string
|
||||
// Path returns the path used by cloud-controller-manager executable within the container image.
|
||||
Path() string
|
||||
// Name returns the cloud-provider name as used by k8s cloud-controller-manager (k8s.gcr.io/cloud-controller-manager).
|
||||
Name() string
|
||||
// ExtraArgs returns a list of arguments to append to the cloud-controller-manager command.
|
||||
ExtraArgs() []string
|
||||
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
|
||||
ConfigMaps(instance Instance) (resources.ConfigMaps, error)
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
|
||||
Secrets(instance Instance, cloudServiceAccountURI string) (resources.Secrets, error)
|
||||
// Volumes returns a list of volumes to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/storage/volumes/ .
|
||||
Volumes() []k8s.Volume
|
||||
// VolumeMounts a list of of volume mounts to deploy together with the k8s cloud-controller-manager.
|
||||
VolumeMounts() []k8s.VolumeMount
|
||||
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cloud-controller-manager.
|
||||
Env() []k8s.EnvVar
|
||||
// PrepareInstance is called on every instance before deploying the cloud-controller-manager.
|
||||
// Allows for cloud-provider specific hooks.
|
||||
PrepareInstance(instance Instance, vpnIP string) error
|
||||
// Supported is used to determine if cloud controller manager is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// CloudNodeManager implementers provide configuration for the k8s cloud-node-manager.
|
||||
type CloudNodeManager interface {
|
||||
// Image returns the container image used to provide cloud-node-manager for the cloud-provider.
|
||||
Image() string
|
||||
// Path returns the path used by cloud-node-manager executable within the container image.
|
||||
Path() string
|
||||
// ExtraArgs returns a list of arguments to append to the cloud-node-manager command.
|
||||
ExtraArgs() []string
|
||||
// Supported is used to determine if cloud node manager is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// ClusterAutoscaler implementers provide configuration for the k8s cluster-autoscaler.
|
||||
type ClusterAutoscaler interface {
|
||||
// Name returns the cloud-provider name as used by k8s cluster-autoscaler.
|
||||
Name() string
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
Secrets(instance Instance, cloudServiceAccountURI string) (resources.Secrets, error)
|
||||
// Volumes returns a list of volumes to deploy together with the k8s cluster-autoscaler.
|
||||
Volumes() []k8s.Volume
|
||||
// VolumeMounts returns a list of volume mounts to deploy together with the k8s cluster-autoscaler.
|
||||
VolumeMounts() []k8s.VolumeMount
|
||||
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cluster-autoscaler.
|
||||
Env() []k8s.EnvVar
|
||||
// Supported is used to determine if cluster autoscaler is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// CoordinatorEndpoints retrieves a list of constellation coordinator endpoint candidates from the cloud provider API.
|
||||
func CoordinatorEndpoints(ctx context.Context, metadata ProviderMetadata) ([]string, error) {
|
||||
if !metadata.Supported() {
|
||||
return nil, errors.New("retrieving instances list from cloud provider is not yet supported")
|
||||
}
|
||||
instances, err := metadata.List(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving instances list from cloud provider failed: %w", err)
|
||||
}
|
||||
coordinatorEndpoints := []string{}
|
||||
for _, instance := range instances {
|
||||
// check if role of instance is "Coordinator"
|
||||
if instance.Role == role.Coordinator {
|
||||
for _, ip := range instance.IPs {
|
||||
coordinatorEndpoints = append(coordinatorEndpoints, net.JoinHostPort(ip, coordinatorPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return coordinatorEndpoints, nil
|
||||
}
|
||||
|
||||
// PrepareInstanceForCCM sets the vpn IP in cloud provider metadata.
|
||||
func PrepareInstanceForCCM(ctx context.Context, metadata ProviderMetadata, cloudControllerManager CloudControllerManager, vpnIP string) error {
|
||||
if err := metadata.SetVPNIP(ctx, vpnIP); err != nil {
|
||||
return fmt.Errorf("setting VPN IP for cloud-controller-manager failed: %w", err)
|
||||
}
|
||||
instance, err := metadata.Self(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving instance metadata for cloud-controller-manager failed: %w", err)
|
||||
}
|
||||
|
||||
return cloudControllerManager.PrepareInstance(instance, vpnIP)
|
||||
}
|
||||
|
||||
type ProviderMetadataFake struct{}
|
||||
|
||||
func (f *ProviderMetadataFake) List(ctx context.Context) ([]Instance, error) {
|
||||
func (f *ProviderMetadataFake) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
self, err := f.Self(ctx)
|
||||
return []Instance{self}, err
|
||||
return []cloudtypes.Instance{self}, err
|
||||
}
|
||||
|
||||
func (f *ProviderMetadataFake) Self(ctx context.Context) (Instance, error) {
|
||||
return Instance{
|
||||
func (f *ProviderMetadataFake) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return cloudtypes.Instance{
|
||||
Name: "instanceName",
|
||||
ProviderID: "fake://instance-id",
|
||||
Role: role.Unknown,
|
||||
IPs: []string{"192.0.2.1"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *ProviderMetadataFake) GetInstance(ctx context.Context, providerID string) (Instance, error) {
|
||||
return Instance{
|
||||
Name: "instanceName",
|
||||
ProviderID: providerID,
|
||||
Role: role.Unknown,
|
||||
IPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -176,96 +61,24 @@ func (f *ProviderMetadataFake) Supported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type CloudControllerManagerFake struct{}
|
||||
// CoordinatorEndpoints retrieves a list of constellation coordinator endpoint candidates from the cloud provider API.
|
||||
func CoordinatorEndpoints(ctx context.Context, metadata ProviderMetadata) ([]string, error) {
|
||||
if !metadata.Supported() {
|
||||
return nil, errors.New("retrieving instances list from cloud provider is not yet supported")
|
||||
}
|
||||
instances, err := metadata.List(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving instances list from cloud provider failed: %w", err)
|
||||
}
|
||||
coordinatorEndpoints := []string{}
|
||||
for _, instance := range instances {
|
||||
// check if role of instance is "Coordinator"
|
||||
if instance.Role == role.Coordinator {
|
||||
for _, ip := range instance.PrivateIPs {
|
||||
coordinatorEndpoints = append(coordinatorEndpoints, net.JoinHostPort(ip, coordinatorPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Image() string {
|
||||
return "fake-image:latest"
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Path() string {
|
||||
return "/fake-controller-manager"
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Name() string {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) ExtraArgs() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) ConfigMaps(instance Instance) (resources.ConfigMaps, error) {
|
||||
return []*k8s.ConfigMap{}, nil
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Secrets(instance Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return []*k8s.Secret{}, nil
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Volumes() []k8s.Volume {
|
||||
return []k8s.Volume{}
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) VolumeMounts() []k8s.VolumeMount {
|
||||
return []k8s.VolumeMount{}
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Env() []k8s.EnvVar {
|
||||
return []k8s.EnvVar{}
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) PrepareInstance(instance Instance, vpnIP string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *CloudControllerManagerFake) Supported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type CloudNodeManagerFake struct{}
|
||||
|
||||
func (f *CloudNodeManagerFake) Image() string {
|
||||
return "fake-image:latest"
|
||||
}
|
||||
|
||||
func (f *CloudNodeManagerFake) Path() string {
|
||||
return "/fake-cloud-node-manager"
|
||||
}
|
||||
|
||||
func (f *CloudNodeManagerFake) ExtraArgs() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (f *CloudNodeManagerFake) Supported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type ClusterAutoscalerFake struct{}
|
||||
|
||||
func (f *ClusterAutoscalerFake) Name() string {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
func (f *ClusterAutoscalerFake) Secrets(instance Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
// Volumes returns a list of volumes to deploy together with the k8s cluster-autoscaler.
|
||||
func (f *ClusterAutoscalerFake) Volumes() []k8s.Volume {
|
||||
return []k8s.Volume{}
|
||||
}
|
||||
|
||||
// VolumeMounts returns a list of volume mounts to deploy together with the k8s cluster-autoscaler.
|
||||
func (f *ClusterAutoscalerFake) VolumeMounts() []k8s.VolumeMount {
|
||||
return []k8s.VolumeMount{}
|
||||
}
|
||||
|
||||
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cluster-autoscaler.
|
||||
func (f *ClusterAutoscalerFake) Env() []k8s.EnvVar {
|
||||
return []k8s.EnvVar{}
|
||||
}
|
||||
|
||||
func (f *ClusterAutoscalerFake) Supported() bool {
|
||||
return false
|
||||
return coordinatorEndpoints, nil
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -20,18 +21,18 @@ func TestCoordinatorEndpoints(t *testing.T) {
|
||||
}{
|
||||
"getting coordinator endpoints works and role is checked": {
|
||||
metadata: stubMetadata{
|
||||
listRes: []Instance{
|
||||
listRes: []cloudtypes.Instance{
|
||||
{
|
||||
Name: "someInstanceA",
|
||||
Role: role.Coordinator,
|
||||
ProviderID: "provider://somePath/someInstanceA",
|
||||
IPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
{
|
||||
Name: "someInstanceB",
|
||||
Role: role.Node,
|
||||
ProviderID: "provider://somePath/someInstanceB",
|
||||
IPs: []string{"192.0.2.2"},
|
||||
PrivateIPs: []string{"192.0.2.2"},
|
||||
},
|
||||
},
|
||||
supportedRes: true,
|
||||
@ -70,64 +71,27 @@ func TestCoordinatorEndpoints(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareInstanceForCCM(t *testing.T) {
|
||||
err := errors.New("some err")
|
||||
|
||||
testCases := map[string]struct {
|
||||
metadata stubMetadata
|
||||
vpnIP string
|
||||
wantErr bool
|
||||
}{
|
||||
"updating role works": {
|
||||
metadata: stubMetadata{},
|
||||
vpnIP: "192.0.2.1",
|
||||
wantErr: false,
|
||||
},
|
||||
"setting VPN IP fails": {
|
||||
metadata: stubMetadata{
|
||||
setVPNIPErr: err,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
err := PrepareInstanceForCCM(context.Background(), &tc.metadata, &CloudControllerManagerFake{}, tc.vpnIP)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubMetadata struct {
|
||||
listRes []Instance
|
||||
listRes []cloudtypes.Instance
|
||||
listErr error
|
||||
selfRes Instance
|
||||
selfRes cloudtypes.Instance
|
||||
selfErr error
|
||||
getInstanceRes Instance
|
||||
getInstanceRes cloudtypes.Instance
|
||||
getInstanceErr error
|
||||
signalRoleErr error
|
||||
setVPNIPErr error
|
||||
supportedRes bool
|
||||
}
|
||||
|
||||
func (m *stubMetadata) List(ctx context.Context) ([]Instance, error) {
|
||||
func (m *stubMetadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
return m.listRes, m.listErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) Self(ctx context.Context) (Instance, error) {
|
||||
func (m *stubMetadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return m.selfRes, m.selfErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (Instance, error) {
|
||||
func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
return m.getInstanceRes, m.getInstanceErr
|
||||
}
|
||||
|
||||
@ -142,3 +106,11 @@ func (m *stubMetadata) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
func (m *stubMetadata) Supported() bool {
|
||||
return m.supportedRes
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
@ -2,11 +2,8 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"go.uber.org/zap"
|
||||
@ -14,93 +11,24 @@ import (
|
||||
)
|
||||
|
||||
// GetK8sJoinArgs returns the args needed by a Node to join the cluster.
|
||||
func (c *Core) GetK8sJoinArgs() (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return c.kube.GetJoinToken(constants.KubernetesJoinTokenTTL)
|
||||
func (c *Core) GetK8sJoinArgs(ctx context.Context) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return c.kube.GetJoinToken(ctx, constants.KubernetesJoinTokenTTL)
|
||||
}
|
||||
|
||||
// GetK8SCertificateKey returns the key needed by a Coordinator to join the cluster.
|
||||
func (c *Core) GetK8SCertificateKey() (string, error) {
|
||||
return c.kube.GetKubeadmCertificateKey()
|
||||
func (c *Core) GetK8SCertificateKey(ctx context.Context) (string, error) {
|
||||
return c.kube.GetKubeadmCertificateKey(ctx)
|
||||
}
|
||||
|
||||
// InitCluster initializes the cluster, stores the join args, and returns the kubeconfig.
|
||||
func (c *Core) InitCluster(autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error) {
|
||||
var nodeName string
|
||||
var providerID string
|
||||
var instance Instance
|
||||
var ccmConfigMaps resources.ConfigMaps
|
||||
var ccmSecrets resources.Secrets
|
||||
var caSecrets resources.Secrets
|
||||
var err error
|
||||
nodeIP := coordinatorVPNIP.String()
|
||||
if c.metadata.Supported() {
|
||||
instance, err = c.metadata.Self(context.TODO())
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving own instance metadata failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
nodeName = instance.Name
|
||||
providerID = instance.ProviderID
|
||||
if len(instance.IPs) > 0 {
|
||||
nodeIP = instance.IPs[0]
|
||||
}
|
||||
} else {
|
||||
nodeName = coordinatorVPNIP.String()
|
||||
}
|
||||
if c.cloudControllerManager.Supported() && c.metadata.Supported() {
|
||||
c.zaplogger.Info("Preparing node for cloud-controller-manager")
|
||||
if err := PrepareInstanceForCCM(context.TODO(), c.metadata, c.cloudControllerManager, coordinatorVPNIP.String()); err != nil {
|
||||
c.zaplogger.Error("Preparing node for CCM failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
ccmConfigMaps, err = c.cloudControllerManager.ConfigMaps(instance)
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Defining ConfigMaps for CCM failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
ccmSecrets, err = c.cloudControllerManager.Secrets(instance, cloudServiceAccountURI)
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Defining Secrets for CCM failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if c.clusterAutoscaler.Supported() {
|
||||
caSecrets, err = c.clusterAutoscaler.Secrets(instance, cloudServiceAccountURI)
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Defining Secrets for cluster-autoscaler failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Core) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error) {
|
||||
c.zaplogger.Info("Initializing cluster")
|
||||
if err := c.kube.InitCluster(kubernetes.InitClusterInput{
|
||||
APIServerAdvertiseIP: coordinatorVPNIP.String(),
|
||||
NodeIP: nodeIP,
|
||||
NodeName: k8sCompliantHostname(nodeName),
|
||||
ProviderID: providerID,
|
||||
SupportClusterAutoscaler: c.clusterAutoscaler.Supported(),
|
||||
AutoscalingCloudprovider: c.clusterAutoscaler.Name(),
|
||||
AutoscalingSecrets: caSecrets,
|
||||
AutoscalingVolumes: c.clusterAutoscaler.Volumes(),
|
||||
AutoscalingVolumeMounts: c.clusterAutoscaler.VolumeMounts(),
|
||||
AutoscalingEnv: c.clusterAutoscaler.Env(),
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
SupportsCloudControllerManager: c.cloudControllerManager.Supported(),
|
||||
CloudControllerManagerName: c.cloudControllerManager.Name(),
|
||||
CloudControllerManagerImage: c.cloudControllerManager.Image(),
|
||||
CloudControllerManagerPath: c.cloudControllerManager.Path(),
|
||||
CloudControllerManagerExtraArgs: c.cloudControllerManager.ExtraArgs(),
|
||||
CloudControllerManagerConfigMaps: ccmConfigMaps,
|
||||
CloudControllerManagerSecrets: ccmSecrets,
|
||||
CloudControllerManagerVolumes: c.cloudControllerManager.Volumes(),
|
||||
CloudControllerManagerVolumeMounts: c.cloudControllerManager.VolumeMounts(),
|
||||
CloudControllerManagerEnv: c.cloudControllerManager.Env(),
|
||||
SupportsCloudNodeManager: c.cloudNodeManager.Supported(),
|
||||
CloudNodeManagerImage: c.cloudNodeManager.Image(),
|
||||
CloudNodeManagerPath: c.cloudNodeManager.Path(),
|
||||
CloudNodeManagerExtraArgs: c.cloudNodeManager.ExtraArgs(),
|
||||
MasterSecret: masterSecret,
|
||||
}); err != nil {
|
||||
vpnIP, err := c.GetVPNIP()
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving vpn ip failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if err := c.kube.InitCluster(ctx, autoscalingNodeGroups, cloudServiceAccountURI, vpnIP, masterSecret); err != nil {
|
||||
c.zaplogger.Error("Initializing cluster failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
@ -125,41 +53,16 @@ func (c *Core) InitCluster(autoscalingNodeGroups []string, cloudServiceAccountUR
|
||||
}
|
||||
|
||||
// JoinCluster lets a Node join the cluster.
|
||||
func (c *Core) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, certKey string, peerRole role.Role) error {
|
||||
func (c *Core) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, certKey string, peerRole role.Role) error {
|
||||
c.zaplogger.Info("Joining Kubernetes cluster")
|
||||
nodeVPNIP, err := c.vpn.GetInterfaceIP()
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving vpn ip failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
var nodeName string
|
||||
var providerID string
|
||||
nodeIP := nodeVPNIP
|
||||
if c.metadata.Supported() {
|
||||
instance, err := c.metadata.Self(context.TODO())
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving own instance metadata failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
providerID = instance.ProviderID
|
||||
nodeName = instance.Name
|
||||
if len(instance.IPs) > 0 {
|
||||
nodeIP = instance.IPs[0]
|
||||
}
|
||||
} else {
|
||||
nodeName = nodeVPNIP
|
||||
}
|
||||
if c.cloudControllerManager.Supported() && c.metadata.Supported() {
|
||||
c.zaplogger.Info("Preparing node for cloud-controller-manager")
|
||||
if err := PrepareInstanceForCCM(context.TODO(), c.metadata, c.cloudControllerManager, nodeVPNIP); err != nil {
|
||||
c.zaplogger.Error("Preparing node for CCM failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.zaplogger.Info("k8s Join data", zap.String("nodename", nodeName), zap.String("nodeIP", nodeIP), zap.String("nodeVPNIP", nodeVPNIP), zap.String("provid", providerID))
|
||||
// we need to pass the VPNIP for another control-plane, otherwise etcd will bind itself to the wrong IP address and fails
|
||||
if err := c.kube.JoinCluster(args, k8sCompliantHostname(nodeName), nodeIP, nodeVPNIP, providerID, certKey, c.cloudControllerManager.Supported(), peerRole); err != nil {
|
||||
if err := c.kube.JoinCluster(ctx, args, nodeVPNIP, certKey, peerRole); err != nil {
|
||||
c.zaplogger.Error("Joining Kubernetes cluster failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
@ -177,15 +80,15 @@ func (c *Core) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, certKey string
|
||||
// Cluster manages the overall cluster lifecycle (init, join).
|
||||
type Cluster interface {
|
||||
// InitCluster bootstraps a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
InitCluster(kubernetes.InitClusterInput) error
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte) error
|
||||
// JoinCluster will join the current node to an existing cluster.
|
||||
JoinCluster(args *kubeadm.BootstrapTokenDiscovery, nodeName, nodeIP, nodeVPNIP, providerID, certKey string, ccmSupported bool, peerRole role.Role) error
|
||||
JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error
|
||||
// GetKubeconfig reads the kubeconfig from the filesystem. Only succeeds after cluster is initialized.
|
||||
GetKubeconfig() ([]byte, error)
|
||||
// GetKubeadmCertificateKey returns the 64-byte hex string key needed to join the cluster as control-plane. This function must be executed on a control-plane.
|
||||
GetKubeadmCertificateKey() (string, error)
|
||||
GetKubeadmCertificateKey(ctx context.Context) (string, error)
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
// StartKubelet starts the kubelet service.
|
||||
StartKubelet() error
|
||||
}
|
||||
@ -194,12 +97,12 @@ type Cluster interface {
|
||||
type ClusterFake struct{}
|
||||
|
||||
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
func (c *ClusterFake) InitCluster(kubernetes.InitClusterInput) error {
|
||||
func (c *ClusterFake) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// JoinCluster will fake joining the current node to an existing cluster.
|
||||
func (c *ClusterFake) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, nodeName, nodeIP, nodeVPNIP, providerID, certKey string, _ bool, _ role.Role) error {
|
||||
func (c *ClusterFake) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -209,12 +112,12 @@ func (c *ClusterFake) GetKubeconfig() ([]byte, error) {
|
||||
}
|
||||
|
||||
// GetKubeadmCertificateKey fakes generating a certificateKey.
|
||||
func (c *ClusterFake) GetKubeadmCertificateKey() (string, error) {
|
||||
func (c *ClusterFake) GetKubeadmCertificateKey(context.Context) (string, error) {
|
||||
return "controlPlaneCertficateKey", nil
|
||||
}
|
||||
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
func (c *ClusterFake) GetJoinToken(_ time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
func (c *ClusterFake) GetJoinToken(ctx context.Context, _ time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "0.0.0.0",
|
||||
Token: "kube-fake-token",
|
||||
@ -226,12 +129,3 @@ func (c *ClusterFake) GetJoinToken(_ time.Duration) (*kubeadm.BootstrapTokenDisc
|
||||
func (c *ClusterFake) StartKubelet() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
||||
// The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ .
|
||||
// Only a simple heuristic is used for now (to lowercase, replace underscores).
|
||||
func k8sCompliantHostname(in string) string {
|
||||
hostname := strings.ToLower(in)
|
||||
hostname = strings.ReplaceAll(hostname, "_", "-")
|
||||
return hostname
|
||||
}
|
||||
|
@ -1,14 +1,12 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
@ -16,165 +14,65 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestInitCluster(t *testing.T) {
|
||||
someErr := errors.New("someErr")
|
||||
kubeconfigContent := []byte("kubeconfig")
|
||||
|
||||
testMS := []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
|
||||
|
||||
testCases := map[string]struct {
|
||||
cluster clusterStub
|
||||
metadata stubMetadata
|
||||
cloudControllerManager stubCloudControllerManager
|
||||
cloudNodeManager stubCloudNodeManager
|
||||
clusterAutoscaler stubClusterAutoscaler
|
||||
masterSecret []byte
|
||||
autoscalingNodeGroups []string
|
||||
wantErr bool
|
||||
wantInitClusterInput kubernetes.InitClusterInput
|
||||
cluster Cluster
|
||||
vpn VPN
|
||||
metadata ProviderMetadata
|
||||
masterSecret []byte
|
||||
autoscalingNodeGroups []string
|
||||
wantErr bool
|
||||
}{
|
||||
"InitCluster works": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
},
|
||||
"InitCluster works even if signal role fails": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true, signalRoleErr: someErr},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
masterSecret: testMS,
|
||||
wantInitClusterInput: kubernetes.InitClusterInput{
|
||||
APIServerAdvertiseIP: "10.118.0.1",
|
||||
NodeIP: "10.118.0.1",
|
||||
NodeName: "10.118.0.1",
|
||||
SupportsCloudControllerManager: false,
|
||||
SupportClusterAutoscaler: false,
|
||||
AutoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
MasterSecret: testMS,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
"Instance metadata is retrieved": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
"cannot get VPN IP": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
masterSecret: testMS,
|
||||
metadata: stubMetadata{
|
||||
selfRes: Instance{
|
||||
Name: "some-name",
|
||||
ProviderID: "fake://providerid",
|
||||
},
|
||||
supportedRes: true,
|
||||
},
|
||||
wantInitClusterInput: kubernetes.InitClusterInput{
|
||||
APIServerAdvertiseIP: "10.118.0.1",
|
||||
NodeIP: "10.118.0.1",
|
||||
NodeName: "some-name",
|
||||
ProviderID: "fake://providerid",
|
||||
SupportsCloudControllerManager: false,
|
||||
SupportClusterAutoscaler: false,
|
||||
MasterSecret: testMS,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
"metadata of self retrieval error is checked": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
},
|
||||
metadata: stubMetadata{
|
||||
supportedRes: true,
|
||||
selfErr: errors.New("metadata retrieval error"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"Autoscaler is prepared when supported": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
},
|
||||
clusterAutoscaler: stubClusterAutoscaler{
|
||||
nameRes: "some-name",
|
||||
supportedRes: true,
|
||||
},
|
||||
masterSecret: testMS,
|
||||
vpn: &stubVPN{getInterfaceIPErr: someErr},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
wantInitClusterInput: kubernetes.InitClusterInput{
|
||||
APIServerAdvertiseIP: "10.118.0.1",
|
||||
NodeIP: "10.118.0.1",
|
||||
NodeName: "10.118.0.1",
|
||||
SupportsCloudControllerManager: false,
|
||||
SupportClusterAutoscaler: true,
|
||||
AutoscalingCloudprovider: "some-name",
|
||||
AutoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
MasterSecret: testMS,
|
||||
},
|
||||
wantErr: false,
|
||||
wantErr: true,
|
||||
},
|
||||
"Node is prepared for CCM if supported": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
},
|
||||
masterSecret: testMS,
|
||||
cloudControllerManager: stubCloudControllerManager{
|
||||
supportedRes: true,
|
||||
nameRes: "some-name",
|
||||
imageRes: "someImage",
|
||||
pathRes: "/some/path",
|
||||
},
|
||||
wantInitClusterInput: kubernetes.InitClusterInput{
|
||||
APIServerAdvertiseIP: "10.118.0.1",
|
||||
NodeIP: "10.118.0.1",
|
||||
NodeName: "10.118.0.1",
|
||||
SupportsCloudControllerManager: true,
|
||||
SupportClusterAutoscaler: false,
|
||||
CloudControllerManagerName: "some-name",
|
||||
CloudControllerManagerImage: "someImage",
|
||||
CloudControllerManagerPath: "/some/path",
|
||||
MasterSecret: testMS,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
"Node preparation for CCM can fail": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
},
|
||||
metadata: stubMetadata{
|
||||
supportedRes: true,
|
||||
},
|
||||
cloudControllerManager: stubCloudControllerManager{
|
||||
supportedRes: true,
|
||||
nameRes: "some-name",
|
||||
imageRes: "someImage",
|
||||
pathRes: "/some/path",
|
||||
prepareInstanceRes: errors.New("preparing node for CCM failed"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"updating role fails without error": {
|
||||
cluster: clusterStub{
|
||||
kubeconfig: []byte("kubeconfig"),
|
||||
},
|
||||
masterSecret: testMS,
|
||||
metadata: stubMetadata{
|
||||
signalRoleErr: errors.New("updating role fails"),
|
||||
supportedRes: true,
|
||||
},
|
||||
wantErr: false,
|
||||
wantInitClusterInput: kubernetes.InitClusterInput{
|
||||
APIServerAdvertiseIP: "10.118.0.1",
|
||||
NodeIP: "10.118.0.1",
|
||||
MasterSecret: testMS,
|
||||
},
|
||||
},
|
||||
"getting kubeconfig fail detected": {
|
||||
cluster: clusterStub{
|
||||
getKubeconfigErr: errors.New("getting kubeconfig fails"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"InitCluster fail detected": {
|
||||
cluster: clusterStub{
|
||||
"cannot init kubernetes": {
|
||||
cluster: &clusterStub{
|
||||
initErr: someErr,
|
||||
},
|
||||
wantErr: true,
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
wantErr: true,
|
||||
},
|
||||
"cannot get kubeconfig": {
|
||||
cluster: &clusterStub{
|
||||
getKubeconfigErr: someErr,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -186,19 +84,17 @@ func TestInitCluster(t *testing.T) {
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, &tc.cluster, &tc.metadata, &tc.cloudControllerManager, &tc.cloudNodeManager, &tc.clusterAutoscaler, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(tc.vpn, tc.cluster, tc.metadata, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
kubeconfig, err := core.InitCluster(tc.autoscalingNodeGroups, "cloud-service-account-uri", tc.masterSecret)
|
||||
kubeconfig, err := core.InitCluster(context.Background(), tc.autoscalingNodeGroups, "cloud-service-account-uri", tc.masterSecret)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
require.Len(tc.cluster.initInputs, 1)
|
||||
assert.Equal(tc.wantInitClusterInput, tc.cluster.initInputs[0])
|
||||
assert.Equal(tc.cluster.kubeconfig, kubeconfig)
|
||||
assert.Equal(kubeconfigContent, kubeconfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -207,90 +103,38 @@ func TestJoinCluster(t *testing.T) {
|
||||
someErr := errors.New("someErr")
|
||||
|
||||
testCases := map[string]struct {
|
||||
cluster clusterStub
|
||||
metadata stubMetadata
|
||||
cloudControllerManager stubCloudControllerManager
|
||||
cloudNodeManager stubCloudNodeManager
|
||||
clusterAutoscaler stubClusterAutoscaler
|
||||
vpn stubVPN
|
||||
wantErr bool
|
||||
wantJoinClusterArgs joinClusterArgs
|
||||
cluster Cluster
|
||||
metadata ProviderMetadata
|
||||
vpn VPN
|
||||
wantErr bool
|
||||
}{
|
||||
"JoinCluster works": {
|
||||
vpn: stubVPN{
|
||||
vpn: &stubVPN{
|
||||
interfaceIP: "192.0.2.0",
|
||||
},
|
||||
wantJoinClusterArgs: joinClusterArgs{
|
||||
args: &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.0:6443",
|
||||
Token: "someToken",
|
||||
CACertHashes: []string{"someHash"},
|
||||
},
|
||||
nodeName: "192.0.2.0",
|
||||
nodeIP: "192.0.2.0",
|
||||
},
|
||||
cluster: &clusterStub{},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
},
|
||||
"JoinCluster fail detected": {
|
||||
cluster: clusterStub{
|
||||
joinErr: someErr,
|
||||
"JoinCluster works even if signal role fails": {
|
||||
vpn: &stubVPN{
|
||||
interfaceIP: "192.0.2.0",
|
||||
},
|
||||
wantErr: true,
|
||||
cluster: &clusterStub{},
|
||||
metadata: &stubMetadata{supportedRes: true, signalRoleErr: someErr},
|
||||
},
|
||||
"retrieving vpn ip failure detected": {
|
||||
vpn: stubVPN{
|
||||
getInterfaceIPErr: errors.New("retrieving interface ip error"),
|
||||
},
|
||||
wantErr: true,
|
||||
"cannot get VPN IP": {
|
||||
vpn: &stubVPN{getInterfaceIPErr: someErr},
|
||||
cluster: &clusterStub{},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
wantErr: true,
|
||||
},
|
||||
"Instance metadata is retrieved": {
|
||||
metadata: stubMetadata{
|
||||
selfRes: Instance{
|
||||
Name: "some-name",
|
||||
ProviderID: "fake://providerid",
|
||||
},
|
||||
supportedRes: true,
|
||||
},
|
||||
wantJoinClusterArgs: joinClusterArgs{
|
||||
args: &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.0:6443",
|
||||
Token: "someToken",
|
||||
CACertHashes: []string{"someHash"},
|
||||
},
|
||||
nodeName: "some-name",
|
||||
providerID: "fake://providerid",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
"Instance metadata retrieval can fail": {
|
||||
metadata: stubMetadata{
|
||||
supportedRes: true,
|
||||
selfErr: errors.New("metadata retrieval error"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"CCM preparation failure is detected": {
|
||||
metadata: stubMetadata{
|
||||
supportedRes: true,
|
||||
},
|
||||
cloudControllerManager: stubCloudControllerManager{
|
||||
supportedRes: true,
|
||||
prepareInstanceRes: errors.New("ccm prepare fails"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"updating role fails without error": {
|
||||
metadata: stubMetadata{
|
||||
signalRoleErr: errors.New("updating role fails"),
|
||||
supportedRes: true,
|
||||
},
|
||||
wantErr: false,
|
||||
wantJoinClusterArgs: joinClusterArgs{
|
||||
args: &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.0:6443",
|
||||
Token: "someToken",
|
||||
CACertHashes: []string{"someHash"},
|
||||
},
|
||||
"joining kuberentes fails": {
|
||||
vpn: &stubVPN{
|
||||
interfaceIP: "192.0.2.0",
|
||||
},
|
||||
cluster: &clusterStub{joinErr: someErr},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -302,7 +146,7 @@ func TestJoinCluster(t *testing.T) {
|
||||
zapLogger, err := zap.NewDevelopment()
|
||||
require.NoError(err)
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&tc.vpn, &tc.cluster, &tc.metadata, &tc.cloudControllerManager, &tc.cloudNodeManager, &tc.clusterAutoscaler, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(tc.vpn, tc.cluster, tc.metadata, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
joinReq := &kubeadm.BootstrapTokenDiscovery{
|
||||
@ -310,47 +154,13 @@ func TestJoinCluster(t *testing.T) {
|
||||
Token: "someToken",
|
||||
CACertHashes: []string{"someHash"},
|
||||
}
|
||||
err = core.JoinCluster(joinReq, "", role.Node)
|
||||
err = core.JoinCluster(context.Background(), joinReq, "", role.Node)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
require.Len(tc.cluster.joinClusterArgs, 1)
|
||||
assert.Equal(tc.wantJoinClusterArgs, tc.cluster.joinClusterArgs[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestK8sCompliantHostname(t *testing.T) {
|
||||
compliantHostname := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
||||
testCases := map[string]struct {
|
||||
hostname string
|
||||
wantHostname string
|
||||
}{
|
||||
"azure scale set names work": {
|
||||
hostname: "constellation-scale-set-coordinators-name_0",
|
||||
wantHostname: "constellation-scale-set-coordinators-name-0",
|
||||
},
|
||||
"compliant hostname is not modified": {
|
||||
hostname: "abcd-123",
|
||||
wantHostname: "abcd-123",
|
||||
},
|
||||
"uppercase hostnames are lowercased": {
|
||||
hostname: "ABCD",
|
||||
wantHostname: "abcd",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
hostname := k8sCompliantHostname(tc.hostname)
|
||||
|
||||
assert.Equal(tc.wantHostname, hostname)
|
||||
assert.Regexp(compliantHostname, hostname)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -364,24 +174,20 @@ type clusterStub struct {
|
||||
getJoinTokenErr error
|
||||
startKubeletErr error
|
||||
|
||||
initInputs []kubernetes.InitClusterInput
|
||||
joinClusterArgs []joinClusterArgs
|
||||
inAutoscalingNodeGroups []string
|
||||
inCloudServiceAccountURI string
|
||||
inVpnIP string
|
||||
}
|
||||
|
||||
func (c *clusterStub) InitCluster(in kubernetes.InitClusterInput) error {
|
||||
c.initInputs = append(c.initInputs, in)
|
||||
func (c *clusterStub) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, vpnIP string, masterSecret []byte) error {
|
||||
c.inAutoscalingNodeGroups = autoscalingNodeGroups
|
||||
c.inCloudServiceAccountURI = cloudServiceAccountURI
|
||||
c.inVpnIP = vpnIP
|
||||
|
||||
return c.initErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, nodeName, nodeIP, nodeVPNIP, providerID, certKey string, _ bool, _ role.Role) error {
|
||||
c.joinClusterArgs = append(c.joinClusterArgs, joinClusterArgs{
|
||||
args: args,
|
||||
nodeName: nodeName,
|
||||
nodeIP: nodeIP,
|
||||
providerID: providerID,
|
||||
})
|
||||
|
||||
func (c *clusterStub) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP string, certKey string, peerRole role.Role) error {
|
||||
return c.joinErr
|
||||
}
|
||||
|
||||
@ -389,149 +195,14 @@ func (c *clusterStub) GetKubeconfig() ([]byte, error) {
|
||||
return c.kubeconfig, c.getKubeconfigErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) GetKubeadmCertificateKey() (string, error) {
|
||||
func (c *clusterStub) GetKubeadmCertificateKey(context.Context) (string, error) {
|
||||
return "dummy", nil
|
||||
}
|
||||
|
||||
func (c *clusterStub) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
func (c *clusterStub) GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return c.getJoinTokenResponse, c.getJoinTokenErr
|
||||
}
|
||||
|
||||
func (c *clusterStub) StartKubelet() error {
|
||||
return c.startKubeletErr
|
||||
}
|
||||
|
||||
type prepareInstanceRequest struct {
|
||||
instance Instance
|
||||
vpnIP string
|
||||
}
|
||||
|
||||
type stubCloudControllerManager struct {
|
||||
imageRes string
|
||||
pathRes string
|
||||
nameRes string
|
||||
prepareInstanceRes error
|
||||
extraArgsRes []string
|
||||
configMapsRes resources.ConfigMaps
|
||||
configMapsErr error
|
||||
secretsRes resources.Secrets
|
||||
secretsErr error
|
||||
volumesRes []k8s.Volume
|
||||
volumeMountRes []k8s.VolumeMount
|
||||
envRes []k8s.EnvVar
|
||||
supportedRes bool
|
||||
|
||||
prepareInstanceRequests []prepareInstanceRequest
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Image() string {
|
||||
return s.imageRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Path() string {
|
||||
return s.pathRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Name() string {
|
||||
return s.nameRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) PrepareInstance(instance Instance, vpnIP string) error {
|
||||
s.prepareInstanceRequests = append(s.prepareInstanceRequests, prepareInstanceRequest{
|
||||
instance: instance,
|
||||
vpnIP: vpnIP,
|
||||
})
|
||||
return s.prepareInstanceRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) ExtraArgs() []string {
|
||||
return s.extraArgsRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) ConfigMaps(instance Instance) (resources.ConfigMaps, error) {
|
||||
return s.configMapsRes, s.configMapsErr
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Secrets(instance Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return s.secretsRes, s.secretsErr
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Volumes() []k8s.Volume {
|
||||
return s.volumesRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) VolumeMounts() []k8s.VolumeMount {
|
||||
return s.volumeMountRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Env() []k8s.EnvVar {
|
||||
return s.envRes
|
||||
}
|
||||
|
||||
func (s *stubCloudControllerManager) Supported() bool {
|
||||
return s.supportedRes
|
||||
}
|
||||
|
||||
type stubCloudNodeManager struct {
|
||||
imageRes string
|
||||
pathRes string
|
||||
extraArgsRes []string
|
||||
supportedRes bool
|
||||
}
|
||||
|
||||
func (s *stubCloudNodeManager) Image() string {
|
||||
return s.imageRes
|
||||
}
|
||||
|
||||
func (s *stubCloudNodeManager) Path() string {
|
||||
return s.pathRes
|
||||
}
|
||||
|
||||
func (s *stubCloudNodeManager) ExtraArgs() []string {
|
||||
return s.extraArgsRes
|
||||
}
|
||||
|
||||
func (s *stubCloudNodeManager) Supported() bool {
|
||||
return s.supportedRes
|
||||
}
|
||||
|
||||
type stubClusterAutoscaler struct {
|
||||
nameRes string
|
||||
supportedRes bool
|
||||
secretsRes resources.Secrets
|
||||
secretsErr error
|
||||
volumesRes []k8s.Volume
|
||||
volumeMountRes []k8s.VolumeMount
|
||||
envRes []k8s.EnvVar
|
||||
}
|
||||
|
||||
func (s *stubClusterAutoscaler) Name() string {
|
||||
return s.nameRes
|
||||
}
|
||||
|
||||
func (s *stubClusterAutoscaler) Secrets(instance Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return s.secretsRes, s.secretsErr
|
||||
}
|
||||
|
||||
func (s *stubClusterAutoscaler) Volumes() []k8s.Volume {
|
||||
return s.volumesRes
|
||||
}
|
||||
|
||||
func (s *stubClusterAutoscaler) VolumeMounts() []k8s.VolumeMount {
|
||||
return s.volumeMountRes
|
||||
}
|
||||
|
||||
func (s *stubClusterAutoscaler) Env() []k8s.EnvVar {
|
||||
return s.envRes
|
||||
}
|
||||
|
||||
func (s *stubClusterAutoscaler) Supported() bool {
|
||||
return s.supportedRes
|
||||
}
|
||||
|
||||
type joinClusterArgs struct {
|
||||
args *kubeadm.BootstrapTokenDiscovery
|
||||
nodeName string
|
||||
nodeIP string
|
||||
providerID string
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -25,8 +24,6 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var coordinatorVPNIP = netip.AddrFrom4([4]byte{10, 118, 0, 1})
|
||||
|
||||
type Core struct {
|
||||
state state.State
|
||||
openTPM vtpm.TPMOpenFunc
|
||||
@ -35,9 +32,6 @@ type Core struct {
|
||||
vpn VPN
|
||||
kube Cluster
|
||||
metadata ProviderMetadata
|
||||
cloudControllerManager CloudControllerManager
|
||||
cloudNodeManager CloudNodeManager
|
||||
clusterAutoscaler ClusterAutoscaler
|
||||
encryptedDisk EncryptedDisk
|
||||
kms kms.CloudKMS
|
||||
zaplogger *zap.Logger
|
||||
@ -50,8 +44,7 @@ type Core struct {
|
||||
|
||||
// NewCore creates and initializes a new Core object.
|
||||
func NewCore(vpn VPN, kube Cluster,
|
||||
metadata ProviderMetadata, cloudControllerManager CloudControllerManager, cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler,
|
||||
encryptedDisk EncryptedDisk, zapLogger *zap.Logger, openTPM vtpm.TPMOpenFunc, persistentStoreFactory PersistentStoreFactory, fileHandler file.Handler, linuxUserManager user.LinuxUserManager,
|
||||
metadata ProviderMetadata, encryptedDisk EncryptedDisk, zapLogger *zap.Logger, openTPM vtpm.TPMOpenFunc, persistentStoreFactory PersistentStoreFactory, fileHandler file.Handler, linuxUserManager user.LinuxUserManager,
|
||||
) (*Core, error) {
|
||||
stor := store.NewStdStore()
|
||||
c := &Core{
|
||||
@ -60,9 +53,6 @@ func NewCore(vpn VPN, kube Cluster,
|
||||
vpn: vpn,
|
||||
kube: kube,
|
||||
metadata: metadata,
|
||||
cloudNodeManager: cloudNodeManager,
|
||||
cloudControllerManager: cloudControllerManager,
|
||||
clusterAutoscaler: clusterAutoscaler,
|
||||
encryptedDisk: encryptedDisk,
|
||||
zaplogger: zapLogger,
|
||||
kms: nil, // KMS is set up during init phase
|
||||
|
@ -38,7 +38,7 @@ func TestGetNextNodeIP(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
require.NoError(core.InitializeStoreIPs())
|
||||
|
||||
@ -82,7 +82,7 @@ func TestSwitchToPersistentStore(t *testing.T) {
|
||||
|
||||
storeFactory := &fakeStoreFactory{}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, storeFactory, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, storeFactory, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(core.store.Put("test", []byte("test")))
|
||||
require.NoError(err)
|
||||
|
||||
@ -97,7 +97,7 @@ func TestGetIDs(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
_, _, err = core.GetIDs(nil)
|
||||
@ -122,7 +122,7 @@ func TestNotifyNodeHeartbeat(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
const ip = "192.0.2.1"
|
||||
@ -136,7 +136,7 @@ func TestDeriveKey(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
// error when no kms is set up
|
||||
@ -214,7 +214,7 @@ func TestInitialize(t *testing.T) {
|
||||
VPNPrivKey: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7},
|
||||
}).ToFile(fileHandler))
|
||||
}
|
||||
core, err := NewCore(&stubVPN{}, &clusterStub{}, &ProviderMetadataFake{}, nil, nil, nil, nil, zaptest.NewLogger(t), openTPM, &fakeStoreFactory{}, fileHandler, user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, &clusterStub{}, &ProviderMetadataFake{}, nil, zaptest.NewLogger(t), openTPM, &fakeStoreFactory{}, fileHandler, user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
core.initialVPNPeersRetriever = fakeInitializeVPNPeersRetriever
|
||||
// prepare store to emulate initialized KMS
|
||||
@ -272,7 +272,7 @@ func TestPersistNodeState(t *testing.T) {
|
||||
require.NoError(err)
|
||||
require.NoError(file.Close())
|
||||
}
|
||||
core, err := NewCore(tc.vpn, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, fileHandler, user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(tc.vpn, nil, nil, nil, zaptest.NewLogger(t), nil, nil, fileHandler, user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
err = core.PersistNodeState(role.Coordinator, "192.0.2.1", []byte("owner-id"), []byte("cluster-id"))
|
||||
if tc.wantErr {
|
||||
|
@ -45,7 +45,7 @@ func TestGetDiskUUID(t *testing.T) {
|
||||
uuid: tc.wantUUID,
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, &diskStub, zapLogger, nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, &diskStub, zapLogger, nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
uuid, err := core.GetDiskUUID()
|
||||
if tc.wantErr {
|
||||
@ -88,7 +88,7 @@ func TestUpdateDiskPassphrase(t *testing.T) {
|
||||
updatePassphraseErr: tc.updatePassphraseErr,
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, &diskStub, zapLogger, nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, &diskStub, zapLogger, nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
err = core.UpdateDiskPassphrase("passphrase")
|
||||
if tc.wantErr {
|
||||
|
@ -124,16 +124,13 @@ func newMockCoreWithDialer(bufDialer *bufconnDialer) (*Core, *pubapi.API, error)
|
||||
vpn := &stubVPN{}
|
||||
kubeFake := &ClusterFake{}
|
||||
metadataFake := &ProviderMetadataFake{}
|
||||
ccmFake := &CloudControllerManagerFake{}
|
||||
cnmFake := &CloudNodeManagerFake{}
|
||||
autoscalerFake := &ClusterAutoscalerFake{}
|
||||
encryptedDiskFake := &EncryptedDiskFake{}
|
||||
|
||||
getPublicAddr := func() (string, error) {
|
||||
return "192.0.2.1", nil
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(vpn, kubeFake, metadataFake, ccmFake, cnmFake, autoscalerFake, encryptedDiskFake, zapLogger, simulator.OpenSimulatedTPM, &fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(vpn, kubeFake, metadataFake, encryptedDiskFake, zapLogger, simulator.OpenSimulatedTPM, &fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func TestGetPeers(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
// prepare store
|
||||
@ -116,7 +116,7 @@ func TestAddPeer(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&tc.vpn, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&tc.vpn, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
err = core.AddPeer(tc.peer)
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
@ -71,7 +72,7 @@ func TestReinitializeAsNode(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
coordinators := []Instance{{IPs: []string{"192.0.2.1"}, Role: role.Coordinator}}
|
||||
coordinators := []cloudtypes.Instance{{PrivateIPs: []string{"192.0.2.1"}, Role: role.Coordinator}}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := grpcutil.NewDialer(&MockValidator{}, netDialer)
|
||||
server := newPubAPIServer()
|
||||
@ -81,7 +82,7 @@ func TestReinitializeAsNode(t *testing.T) {
|
||||
defer server.Stop()
|
||||
vpn := &stubVPN{}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(vpn, nil, &stubMetadata{listRes: coordinators, supportedRes: true}, nil, nil, nil, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(vpn, nil, &stubMetadata{listRes: coordinators, supportedRes: true}, nil, zaptest.NewLogger(t), nil, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
err = core.ReinitializeAsNode(context.Background(), dialer, vpnIP, &stubPubAPI{}, 0)
|
||||
|
||||
@ -144,7 +145,7 @@ func TestReinitializeAsCoordinator(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
coordinators := []Instance{{IPs: []string{"192.0.2.1"}, Role: role.Coordinator}}
|
||||
coordinators := []cloudtypes.Instance{{PrivateIPs: []string{"192.0.2.1"}, Role: role.Coordinator}}
|
||||
netDialer := testdialer.NewBufconnDialer()
|
||||
dialer := grpcutil.NewDialer(&MockValidator{}, netDialer)
|
||||
server := newPubAPIServer()
|
||||
@ -154,7 +155,7 @@ func TestReinitializeAsCoordinator(t *testing.T) {
|
||||
defer server.Stop()
|
||||
vpn := &stubVPN{}
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(vpn, nil, &stubMetadata{listRes: coordinators, supportedRes: true}, nil, nil, nil, nil, zaptest.NewLogger(t), nil, &fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(vpn, nil, &stubMetadata{listRes: coordinators, supportedRes: true}, nil, zaptest.NewLogger(t), nil, &fakeStoreFactory{}, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
// prepare store to emulate initialized KMS
|
||||
require.NoError(core.data().PutKMSData(kms.KMSInformation{StorageUri: kms.NoStoreURI, KmsUri: kms.ClusterKMSURI}))
|
||||
@ -224,10 +225,10 @@ func TestGetInitialVPNPeers(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
coordinators := func(ips []string) []Instance {
|
||||
instances := []Instance{}
|
||||
coordinators := func(ips []string) []cloudtypes.Instance {
|
||||
instances := []cloudtypes.Instance{}
|
||||
for _, ip := range ips {
|
||||
instances = append(instances, Instance{IPs: []string{ip}, Role: role.Coordinator})
|
||||
instances = append(instances, cloudtypes.Instance{PrivateIPs: []string{ip}, Role: role.Coordinator})
|
||||
}
|
||||
return instances
|
||||
}(tc.coordinatorIPs)
|
||||
|
@ -67,7 +67,7 @@ func TestAdvanceState(t *testing.T) {
|
||||
}
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, nil, nil, nil, zaptest.NewLogger(t), openTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
core, err := NewCore(&stubVPN{}, nil, nil, nil, zaptest.NewLogger(t), openTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
assert.Equal(state.Uninitialized, core.GetState())
|
||||
core.state = tc.initialState
|
||||
|
246
coordinator/kubernetes/cloud_provider.go
Normal file
246
coordinator/kubernetes/cloud_provider.go
Normal file
@ -0,0 +1,246 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ProviderMetadata implementers read/write cloud provider metadata.
|
||||
type ProviderMetadata interface {
|
||||
// List retrieves all instances belonging to the current Constellation.
|
||||
List(ctx context.Context) ([]cloudtypes.Instance, error)
|
||||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (cloudtypes.Instance, error)
|
||||
// GetSubnetworkCIDR retrieves the subnetwork CIDR for the current instance.
|
||||
GetSubnetworkCIDR(ctx context.Context) (string, error)
|
||||
// SupportsLoadBalancer returns true if the cloud provider supports load balancers.
|
||||
SupportsLoadBalancer() bool
|
||||
// GetLoadBalancerIP retrieves the load balancer IP.
|
||||
GetLoadBalancerIP(ctx context.Context) (string, error)
|
||||
// GetInstance retrieves an instance using its providerID.
|
||||
GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error)
|
||||
// SignalRole signals the constellation role via cloud provider metadata (if supported by the CSP and deployment type, otherwise does nothing).
|
||||
SignalRole(ctx context.Context, role role.Role) error
|
||||
// SetVPNIP stores the internally used VPN IP in cloud provider metadata (if supported and required for autoscaling by the CSP, otherwise does nothing).
|
||||
SetVPNIP(ctx context.Context, vpnIP string) error
|
||||
// Supported is used to determine if metadata API is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// CloudControllerManager implementers provide configuration for the k8s cloud-controller-manager.
|
||||
type CloudControllerManager interface {
|
||||
// Image returns the container image used to provide cloud-controller-manager for the cloud-provider.
|
||||
Image() string
|
||||
// Path returns the path used by cloud-controller-manager executable within the container image.
|
||||
Path() string
|
||||
// Name returns the cloud-provider name as used by k8s cloud-controller-manager (k8s.gcr.io/cloud-controller-manager).
|
||||
Name() string
|
||||
// ExtraArgs returns a list of arguments to append to the cloud-controller-manager command.
|
||||
ExtraArgs() []string
|
||||
// ConfigMaps returns a list of ConfigMaps to deploy together with the k8s cloud-controller-manager
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/configmap/ .
|
||||
ConfigMaps(instance cloudtypes.Instance) (resources.ConfigMaps, error)
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/configuration/secret/ .
|
||||
Secrets(ctx context.Context, instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error)
|
||||
// Volumes returns a list of volumes to deploy together with the k8s cloud-controller-manager.
|
||||
// Reference: https://kubernetes.io/docs/concepts/storage/volumes/ .
|
||||
Volumes() []k8s.Volume
|
||||
// VolumeMounts a list of of volume mounts to deploy together with the k8s cloud-controller-manager.
|
||||
VolumeMounts() []k8s.VolumeMount
|
||||
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cloud-controller-manager.
|
||||
Env() []k8s.EnvVar
|
||||
// Supported is used to determine if cloud controller manager is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// CloudNodeManager implementers provide configuration for the k8s cloud-node-manager.
|
||||
type CloudNodeManager interface {
|
||||
// Image returns the container image used to provide cloud-node-manager for the cloud-provider.
|
||||
Image() string
|
||||
// Path returns the path used by cloud-node-manager executable within the container image.
|
||||
Path() string
|
||||
// ExtraArgs returns a list of arguments to append to the cloud-node-manager command.
|
||||
ExtraArgs() []string
|
||||
// Supported is used to determine if cloud node manager is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
// ClusterAutoscaler implementers provide configuration for the k8s cluster-autoscaler.
|
||||
type ClusterAutoscaler interface {
|
||||
// Name returns the cloud-provider name as used by k8s cluster-autoscaler.
|
||||
Name() string
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
Secrets(instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error)
|
||||
// Volumes returns a list of volumes to deploy together with the k8s cluster-autoscaler.
|
||||
Volumes() []k8s.Volume
|
||||
// VolumeMounts returns a list of volume mounts to deploy together with the k8s cluster-autoscaler.
|
||||
VolumeMounts() []k8s.VolumeMount
|
||||
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cluster-autoscaler.
|
||||
Env() []k8s.EnvVar
|
||||
// Supported is used to determine if cluster autoscaler is implemented for this cloud provider.
|
||||
Supported() bool
|
||||
}
|
||||
|
||||
type stubProviderMetadata struct {
|
||||
GetLoadBalancerIPErr error
|
||||
GetLoadBalancerIPResp string
|
||||
|
||||
GetSubnetworkCIDRErr error
|
||||
GetSubnetworkCIDRResp string
|
||||
|
||||
ListErr error
|
||||
ListResp []cloudtypes.Instance
|
||||
|
||||
SignalRoleErr error
|
||||
SetVPNIPErr error
|
||||
|
||||
SelfErr error
|
||||
SelfResp cloudtypes.Instance
|
||||
|
||||
GetInstanceErr error
|
||||
GetInstanceResp cloudtypes.Instance
|
||||
|
||||
SupportedResp bool
|
||||
SupportsLoadBalancerResp bool
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) GetLoadBalancerIP(ctx context.Context) (string, error) {
|
||||
return m.GetLoadBalancerIPResp, m.GetLoadBalancerIPErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) GetSubnetworkCIDR(ctx context.Context) (string, error) {
|
||||
return m.GetSubnetworkCIDRResp, m.GetSubnetworkCIDRErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
return m.ListResp, m.ListErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return m.SelfResp, m.SelfErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
return m.GetInstanceResp, m.GetInstanceErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) SignalRole(ctx context.Context, role role.Role) error {
|
||||
return m.SignalRoleErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) SetVPNIP(ctx context.Context, vpnIP string) error {
|
||||
return m.SetVPNIPErr
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) Supported() bool {
|
||||
return m.SupportedResp
|
||||
}
|
||||
|
||||
func (m *stubProviderMetadata) SupportsLoadBalancer() bool {
|
||||
return m.SupportsLoadBalancerResp
|
||||
}
|
||||
|
||||
type stubCloudControllerManager struct {
|
||||
SupportedResp bool
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Image() string {
|
||||
return "stub-image:latest"
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Path() string {
|
||||
return "/stub-controller-manager"
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Name() string {
|
||||
return "stub"
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) ExtraArgs() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) ConfigMaps(instance cloudtypes.Instance) (resources.ConfigMaps, error) {
|
||||
return []*k8s.ConfigMap{}, nil
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Secrets(ctx context.Context, instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return []*k8s.Secret{}, nil
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Volumes() []k8s.Volume {
|
||||
return []k8s.Volume{}
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) VolumeMounts() []k8s.VolumeMount {
|
||||
return []k8s.VolumeMount{}
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Env() []k8s.EnvVar {
|
||||
return []k8s.EnvVar{}
|
||||
}
|
||||
|
||||
func (m *stubCloudControllerManager) Supported() bool {
|
||||
return m.SupportedResp
|
||||
}
|
||||
|
||||
type stubCloudNodeManager struct {
|
||||
SupportedResp bool
|
||||
|
||||
ImageResp string
|
||||
PathResp string
|
||||
ExtraArgsResp []string
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) Image() string {
|
||||
return m.ImageResp
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) Path() string {
|
||||
return m.PathResp
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) ExtraArgs() []string {
|
||||
return m.ExtraArgsResp
|
||||
}
|
||||
|
||||
func (m *stubCloudNodeManager) Supported() bool {
|
||||
return m.SupportedResp
|
||||
}
|
||||
|
||||
type stubClusterAutoscaler struct {
|
||||
SupportedResp bool
|
||||
}
|
||||
|
||||
func (a *stubClusterAutoscaler) Name() string {
|
||||
return "stub"
|
||||
}
|
||||
|
||||
// Secrets returns a list of secrets to deploy together with the k8s cluster-autoscaler.
|
||||
func (a *stubClusterAutoscaler) Secrets(instance cloudtypes.Instance, cloudServiceAccountURI string) (resources.Secrets, error) {
|
||||
return resources.Secrets{}, nil
|
||||
}
|
||||
|
||||
// Volumes returns a list of volumes to deploy together with the k8s cluster-autoscaler.
|
||||
func (a *stubClusterAutoscaler) Volumes() []k8s.Volume {
|
||||
return []k8s.Volume{}
|
||||
}
|
||||
|
||||
// VolumeMounts returns a list of volume mounts to deploy together with the k8s cluster-autoscaler.
|
||||
func (a *stubClusterAutoscaler) VolumeMounts() []k8s.VolumeMount {
|
||||
return []k8s.VolumeMount{}
|
||||
}
|
||||
|
||||
// Env returns a list of k8s environment key-value pairs to deploy together with the k8s cluster-autoscaler.
|
||||
func (a *stubClusterAutoscaler) Env() []k8s.EnvVar {
|
||||
return []k8s.EnvVar{}
|
||||
}
|
||||
|
||||
func (a *stubClusterAutoscaler) Supported() bool {
|
||||
return a.SupportedResp
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// InitClusterInput collects the arguments to initialize a new cluster.
|
||||
type InitClusterInput struct {
|
||||
APIServerAdvertiseIP string
|
||||
NodeIP string
|
||||
NodeName string
|
||||
ProviderID string
|
||||
SupportClusterAutoscaler bool
|
||||
AutoscalingCloudprovider string
|
||||
AutoscalingNodeGroups []string
|
||||
AutoscalingSecrets resources.Secrets
|
||||
AutoscalingVolumes []k8s.Volume
|
||||
AutoscalingVolumeMounts []k8s.VolumeMount
|
||||
AutoscalingEnv []k8s.EnvVar
|
||||
SupportsCloudControllerManager bool
|
||||
CloudControllerManagerName string
|
||||
CloudControllerManagerImage string
|
||||
CloudControllerManagerPath string
|
||||
CloudControllerManagerExtraArgs []string
|
||||
CloudControllerManagerConfigMaps resources.ConfigMaps
|
||||
CloudControllerManagerSecrets resources.Secrets
|
||||
CloudControllerManagerVolumes []k8s.Volume
|
||||
CloudControllerManagerVolumeMounts []k8s.VolumeMount
|
||||
CloudControllerManagerEnv []k8s.EnvVar
|
||||
SupportsCloudNodeManager bool
|
||||
CloudNodeManagerImage string
|
||||
CloudNodeManagerPath string
|
||||
CloudNodeManagerExtraArgs []string
|
||||
MasterSecret []byte
|
||||
}
|
@ -104,7 +104,6 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool) Kube
|
||||
"profiling": "false",
|
||||
},
|
||||
},
|
||||
ControlPlaneEndpoint: "127.0.0.1:16443",
|
||||
},
|
||||
// warning: this config is applied to every node in the cluster!
|
||||
KubeletConfiguration: kubeletconf.KubeletConfiguration{
|
||||
@ -123,6 +122,18 @@ func (c *CoreOSConfiguration) InitConfiguration(externalCloudProvider bool) Kube
|
||||
APIVersion: kubeletconf.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
RegisterWithTaints: []corev1.Taint{
|
||||
{
|
||||
Key: "node.cloudprovider.kubernetes.io/uninitialized",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.cilium.io/agent-not-ready",
|
||||
Value: "true",
|
||||
Effect: corev1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -157,63 +168,6 @@ func (c *CoreOSConfiguration) JoinConfiguration(externalCloudProvider bool) Kube
|
||||
}
|
||||
}
|
||||
|
||||
type AWSConfiguration struct{}
|
||||
|
||||
func (a *AWSConfiguration) InitConfiguration() KubeadmInitYAML {
|
||||
return KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
Kind: "InitConfiguration",
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
CRISocket: "/run/containerd/containerd.sock",
|
||||
IgnorePreflightErrors: []string{"SystemVerification"},
|
||||
},
|
||||
LocalAPIEndpoint: kubeadm.APIEndpoint{BindPort: bindPort},
|
||||
},
|
||||
ClusterConfiguration: kubeadm.ClusterConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
Kind: "ClusterConfiguration",
|
||||
},
|
||||
APIServer: kubeadm.APIServer{
|
||||
CertSANs: []string{"10.118.0.1"},
|
||||
},
|
||||
},
|
||||
KubeletConfiguration: kubeletconf.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletconf.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AWSConfiguration) JoinConfiguration() KubeadmJoinYAML {
|
||||
return KubeadmJoinYAML{
|
||||
JoinConfiguration: kubeadm.JoinConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeadm.SchemeGroupVersion.String(),
|
||||
Kind: "JoinConfiguration",
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
CRISocket: "/run/containerd/containerd.sock",
|
||||
IgnorePreflightErrors: []string{"SystemVerification"},
|
||||
},
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: &kubeadm.BootstrapTokenDiscovery{},
|
||||
},
|
||||
},
|
||||
KubeletConfiguration: kubeletconf.KubeletConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: kubeletconf.SchemeGroupVersion.String(),
|
||||
Kind: "KubeletConfiguration",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type KubeadmJoinYAML struct {
|
||||
JoinConfiguration kubeadm.JoinConfiguration
|
||||
KubeletConfiguration kubeletconf.KubeletConfiguration
|
||||
@ -276,10 +230,27 @@ func (k *KubeadmInitYAML) SetNodeName(nodeName string) {
|
||||
k.InitConfiguration.NodeRegistration.Name = nodeName
|
||||
}
|
||||
|
||||
// SetCertSANs sets the SANs for the certificate.
|
||||
func (k *KubeadmInitYAML) SetCertSANs(certSANs []string) {
|
||||
for _, certSAN := range certSANs {
|
||||
if certSAN == "" {
|
||||
continue
|
||||
}
|
||||
k.ClusterConfiguration.APIServer.CertSANs = append(k.ClusterConfiguration.APIServer.CertSANs, certSAN)
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubeadmInitYAML) SetApiServerAdvertiseAddress(apiServerAdvertiseAddress string) {
|
||||
k.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress = apiServerAdvertiseAddress
|
||||
}
|
||||
|
||||
// SetControlPlaneEndpoint sets the control plane endpoint if controlPlaneEndpoint is not empty.
|
||||
func (k *KubeadmInitYAML) SetControlPlaneEndpoint(controlPlaneEndpoint string) {
|
||||
if controlPlaneEndpoint != "" {
|
||||
k.ClusterConfiguration.ControlPlaneEndpoint = controlPlaneEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubeadmInitYAML) SetServiceCIDR(serviceCIDR string) {
|
||||
k.ClusterConfiguration.Networking.ServiceSubnet = serviceCIDR
|
||||
}
|
||||
|
@ -13,27 +13,11 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func TestInitConfiguration(t *testing.T) {
|
||||
awsConfig := AWSConfiguration{}
|
||||
coreOSConfig := CoreOSConfiguration{}
|
||||
|
||||
testCases := map[string]struct {
|
||||
config KubeadmInitYAML
|
||||
}{
|
||||
"AWS init config can be created": {
|
||||
config: awsConfig.InitConfiguration(),
|
||||
},
|
||||
"AWS init config with all fields can be created": {
|
||||
config: func() KubeadmInitYAML {
|
||||
c := awsConfig.InitConfiguration()
|
||||
c.SetApiServerAdvertiseAddress("192.0.2.0")
|
||||
c.SetNodeIP("192.0.2.0")
|
||||
c.SetNodeName("node")
|
||||
c.SetPodNetworkCIDR("10.244.0.0/16")
|
||||
c.SetServiceCIDR("10.245.0.0/24")
|
||||
c.SetProviderID("somecloudprovider://instance-id")
|
||||
return c
|
||||
}(),
|
||||
},
|
||||
"CoreOS init config can be created": {
|
||||
config: coreOSConfig.InitConfiguration(true),
|
||||
},
|
||||
@ -67,27 +51,11 @@ func TestInitConfiguration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJoinConfiguration(t *testing.T) {
|
||||
awsConfig := AWSConfiguration{}
|
||||
coreOSConfig := CoreOSConfiguration{}
|
||||
|
||||
testCases := map[string]struct {
|
||||
config KubeadmJoinYAML
|
||||
}{
|
||||
"AWS join config can be created": {
|
||||
config: awsConfig.JoinConfiguration(),
|
||||
},
|
||||
"AWS join config with all fields can be created": {
|
||||
config: func() KubeadmJoinYAML {
|
||||
c := awsConfig.JoinConfiguration()
|
||||
c.SetApiServerEndpoint("192.0.2.0:6443")
|
||||
c.SetNodeIP("192.0.2.0")
|
||||
c.SetNodeName("node")
|
||||
c.SetToken("token")
|
||||
c.AppendDiscoveryTokenCaCertHash("discovery-token-ca-cert-hash")
|
||||
c.SetProviderID("somecloudprovider://instance-id")
|
||||
return c
|
||||
}(),
|
||||
},
|
||||
"CoreOS join config can be created": {
|
||||
config: coreOSConfig.JoinConfiguration(true),
|
||||
},
|
||||
|
@ -239,18 +239,13 @@ func TestGetObjects(t *testing.T) {
|
||||
resourcesYAML string
|
||||
wantErr bool
|
||||
}{
|
||||
"GetObjects works on flannel deployment": {
|
||||
wantResources: resources.NewDefaultFlannelDeployment(),
|
||||
resourcesYAML: string(nginxDeplYAML),
|
||||
wantErr: false,
|
||||
},
|
||||
"GetObjects works on cluster-autoscaler deployment": {
|
||||
wantResources: resources.NewDefaultFlannelDeployment(),
|
||||
wantResources: resources.NewDefaultAutoscalerDeployment(nil, nil, nil),
|
||||
resourcesYAML: string(nginxDeplYAML),
|
||||
wantErr: false,
|
||||
},
|
||||
"GetObjects works on cloud-controller-manager deployment": {
|
||||
wantResources: resources.NewDefaultCloudControllerManagerDeployment("someProvider", "someImage", "somePath", nil, nil, nil, nil),
|
||||
wantResources: resources.NewDefaultCloudControllerManagerDeployment("someProvider", "someImage", "somePath", "someCIDR", nil, nil, nil, nil),
|
||||
resourcesYAML: string(nginxDeplYAML),
|
||||
wantErr: false,
|
||||
},
|
||||
|
@ -9,8 +9,6 @@ import (
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const defaultCIDR = "10.244.0.0/16"
|
||||
|
||||
type cloudControllerManagerDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
@ -22,14 +20,12 @@ type cloudControllerManagerDeployment struct {
|
||||
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
|
||||
|
||||
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
|
||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
|
||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
|
||||
command := []string{
|
||||
path,
|
||||
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
|
||||
"--leader-elect=true",
|
||||
"--allocate-node-cidrs=false",
|
||||
"--configure-cloud-routes=false",
|
||||
fmt.Sprintf("--cluster-cidr=%s", defaultCIDR),
|
||||
fmt.Sprintf("--cluster-cidr=%s", podCIDR),
|
||||
"-v=2",
|
||||
}
|
||||
command = append(command, extraArgs...)
|
||||
@ -151,6 +147,10 @@ func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path strin
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node.kubernetes.io/not-ready",
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
NodeSelector: map[string]string{
|
||||
"node-role.kubernetes.io/master": "",
|
||||
|
@ -12,7 +12,7 @@ func TestCloudControllerMarshalUnmarshal(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path", []string{}, []k8s.Volume{}, []k8s.VolumeMount{}, nil)
|
||||
cloudControllerManagerDepl := NewDefaultCloudControllerManagerDeployment("dummy-cloudprovider", "some-image:latest", "/dummy_path", "192.0.2.0/24", []string{}, []k8s.Volume{}, []k8s.VolumeMount{}, nil)
|
||||
data, err := cloudControllerManagerDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
|
@ -1,339 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
cniConfJSON = `{"name":"cbr0","cniVersion":"0.3.1","plugins":[{"type":"flannel","delegate":{"hairpinMode":true,"isDefaultGateway":true}},{"type":"portmap","capabilities":{"portMappings":true}}]}`
|
||||
netConfJSON = `{"Network":"10.244.0.0/16","Backend":{"Type":"vxlan"}}`
|
||||
)
|
||||
|
||||
// Reference: https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
|
||||
// Changes compared to the reference: added the wireguard interface "wg0" to the args of the "kube-flannel" container of the DaemonSet.
|
||||
|
||||
type FlannelDeployment struct {
|
||||
PodSecurityPolicy policy.PodSecurityPolicy
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ConfigMap k8s.ConfigMap
|
||||
DaemonSet apps.DaemonSet
|
||||
}
|
||||
|
||||
func NewDefaultFlannelDeployment() *FlannelDeployment {
|
||||
return &FlannelDeployment{
|
||||
PodSecurityPolicy: policy.PodSecurityPolicy{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "policy/v1beta1",
|
||||
Kind: "PodSecurityPolicy",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "psp.flannel.unprivileged",
|
||||
Annotations: map[string]string{
|
||||
"seccomp.security.alpha.kubernetes.io/allowedProfileNames": "docker/default",
|
||||
"seccomp.security.alpha.kubernetes.io/defaultProfileName": "docker/default",
|
||||
"apparmor.security.beta.kubernetes.io/allowedProfileNames": "runtime/default",
|
||||
"apparmor.security.beta.kubernetes.io/defaultProfileName": "runtime/default",
|
||||
},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
Volumes: []policy.FSType{
|
||||
policy.FSType("configMap"),
|
||||
policy.FSType("secret"),
|
||||
policy.FSType("emptyDir"),
|
||||
policy.FSType("hostPath"),
|
||||
},
|
||||
AllowedHostPaths: []policy.AllowedHostPath{
|
||||
{PathPrefix: "/etc/cni/net.d"},
|
||||
{PathPrefix: "/etc/kube-flannel"},
|
||||
{PathPrefix: "/run/flannel"},
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
AllowPrivilegeEscalation: proto.Bool(false),
|
||||
DefaultAllowPrivilegeEscalation: proto.Bool(false),
|
||||
AllowedCapabilities: []k8s.Capability{
|
||||
k8s.Capability("NET_ADMIN"),
|
||||
k8s.Capability("NET_RAW"),
|
||||
},
|
||||
HostPID: false,
|
||||
HostIPC: false,
|
||||
HostNetwork: true,
|
||||
HostPorts: []policy.HostPortRange{
|
||||
{Min: 0, Max: 65535},
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "flannel",
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
Verbs: []string{"use"},
|
||||
ResourceNames: []string{"psp.flannel.unprivileged"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods"},
|
||||
Verbs: []string{"get"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
Verbs: []string{"list", "watch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes/status"},
|
||||
Verbs: []string{"patch"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ClusterRoleBinding: rbac.ClusterRoleBinding{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "ClusterRoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "flannel",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "flannel",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "flannel",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ServiceAccount",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "flannel",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
ConfigMap: k8s.ConfigMap{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kube-flannel-cfg",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"tier": "node",
|
||||
"app": "flannel",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"cni-conf.json": cniConfJSON,
|
||||
"net-conf.json": netConfJSON,
|
||||
},
|
||||
},
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kube-flannel-ds",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"tier": "node",
|
||||
"app": "flannel",
|
||||
},
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &v1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "flannel"},
|
||||
},
|
||||
Template: k8s.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"tier": "node",
|
||||
"app": "flannel",
|
||||
},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
Affinity: &k8s.Affinity{
|
||||
NodeAffinity: &k8s.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &k8s.NodeSelector{
|
||||
NodeSelectorTerms: []k8s.NodeSelectorTerm{
|
||||
{MatchExpressions: []k8s.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: k8s.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HostNetwork: true,
|
||||
PriorityClassName: "system-node-critical",
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "flannel",
|
||||
InitContainers: []k8s.Container{
|
||||
{
|
||||
Name: "install-cni-plugin",
|
||||
Image: "rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.0",
|
||||
Command: []string{"cp"},
|
||||
Args: []string{"-f", "/flannel", "/opt/cni/bin/flannel"},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "cni-plugin",
|
||||
MountPath: "/opt/cni/bin",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "install-cni",
|
||||
Image: "quay.io/coreos/flannel:v0.15.1",
|
||||
Command: []string{"cp"},
|
||||
Args: []string{"-f", "/etc/kube-flannel/cni-conf.json", "/etc/cni/net.d/10-flannel.conflist"},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "cni",
|
||||
MountPath: "/etc/cni/net.d",
|
||||
},
|
||||
{
|
||||
Name: "flannel-cfg",
|
||||
MountPath: "/etc/kube-flannel/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []k8s.Container{
|
||||
{
|
||||
Name: "kube-flannel",
|
||||
Image: "quay.io/coreos/flannel:v0.15.1",
|
||||
Command: []string{"/opt/bin/flanneld"},
|
||||
Args: []string{"--ip-masq", "--kube-subnet-mgr", "--iface", "wg0"},
|
||||
Resources: k8s.ResourceRequirements{
|
||||
Requests: k8s.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("50Mi"),
|
||||
},
|
||||
Limits: k8s.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &k8s.SecurityContext{
|
||||
Privileged: proto.Bool(false),
|
||||
Capabilities: &k8s.Capabilities{
|
||||
Add: []k8s.Capability{k8s.Capability("NET_ADMIN"), k8s.Capability("NET_RAW")},
|
||||
},
|
||||
},
|
||||
Env: []k8s.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &k8s.EnvVarSource{
|
||||
FieldRef: &k8s.ObjectFieldSelector{FieldPath: "metadata.name"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &k8s.EnvVarSource{
|
||||
FieldRef: &k8s.ObjectFieldSelector{FieldPath: "metadata.namespace"},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "run",
|
||||
MountPath: "/run/flannel",
|
||||
},
|
||||
{
|
||||
Name: "flannel-cfg",
|
||||
MountPath: "/etc/kube-flannel/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []k8s.Volume{
|
||||
{
|
||||
Name: "run",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/run/flannel",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cni-plugin",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/opt/cni/bin",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cni",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/etc/cni/net.d",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "flannel-cfg",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
ConfigMap: &k8s.ConfigMapVolumeSource{
|
||||
LocalObjectReference: k8s.LocalObjectReference{
|
||||
Name: "kube-flannel-cfg",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FlannelDeployment) Marshal() ([]byte, error) {
|
||||
return MarshalK8SResources(f)
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFlannelDeployment(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
flannelDeployment := NewDefaultFlannelDeployment()
|
||||
data, err := flannelDeployment.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated FlannelDeployment
|
||||
require.NoError(UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(flannelDeployment, &recreated)
|
||||
}
|
@ -20,6 +20,11 @@ const (
|
||||
kubeletStartTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
kubernetesKeyRegexp = regexp.MustCompile("[a-f0-9]{64}")
|
||||
providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
|
||||
)
|
||||
|
||||
// Client provides the functionality of `kubectl apply`.
|
||||
type Client interface {
|
||||
Apply(resources resources.Marshaler, forceConflicts bool) error
|
||||
@ -27,21 +32,6 @@ type Client interface {
|
||||
// TODO: add tolerations
|
||||
}
|
||||
|
||||
type ClusterUtil interface {
|
||||
InstallComponents(ctx context.Context, version string) error
|
||||
InitCluster(initConfig []byte) error
|
||||
JoinCluster(joinConfig []byte) error
|
||||
SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error
|
||||
SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error
|
||||
StartKubelet() error
|
||||
RestartKubelet() error
|
||||
GetControlPlaneJoinCertificateKey() (string, error)
|
||||
CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
}
|
||||
|
||||
// KubernetesUtil provides low level management of the kubernetes cluster.
|
||||
type KubernetesUtil struct {
|
||||
inst installer
|
||||
@ -68,7 +58,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, version string)
|
||||
return enableSystemdUnit(ctx, kubeletServiceEtcPath)
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) InitCluster(initConfig []byte) error {
|
||||
func (k *KubernetesUtil) InitCluster(ctx context.Context, initConfig []byte) error {
|
||||
// TODO: audit policy should be user input
|
||||
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
|
||||
if err != nil {
|
||||
@ -88,7 +78,7 @@ func (k *KubernetesUtil) InitCluster(initConfig []byte) error {
|
||||
return fmt.Errorf("writing kubeadm init yaml config %v failed: %w", initConfigFile.Name(), err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(kubeadmPath, "init", "--config", initConfigFile.Name())
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "init", "--config", initConfigFile.Name())
|
||||
_, err = cmd.Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
@ -100,18 +90,84 @@ func (k *KubernetesUtil) InitCluster(initConfig []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupPodNetwork sets up the flannel pod network.
|
||||
func (k *KubernetesUtil) SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error {
|
||||
if err := kubectl.Apply(podNetworkConfiguration, true); err != nil {
|
||||
type SetupPodNetworkInput struct {
|
||||
CloudProvider string
|
||||
NodeName string
|
||||
FirstNodePodCIDR string
|
||||
SubnetworkPodCIDR string
|
||||
ProviderID string
|
||||
}
|
||||
|
||||
// SetupPodNetwork sets up the cilium pod network.
|
||||
func (k *KubernetesUtil) SetupPodNetwork(ctx context.Context, in SetupPodNetworkInput) error {
|
||||
switch in.CloudProvider {
|
||||
case "gcp":
|
||||
return k.setupGCPPodNetwork(ctx, in.NodeName, in.FirstNodePodCIDR, in.SubnetworkPodCIDR)
|
||||
case "azure":
|
||||
return k.setupAzurePodNetwork(ctx, in.ProviderID, in.SubnetworkPodCIDR)
|
||||
case "qemu":
|
||||
return k.setupQemuPodNetwork(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unsupported cloud provider %q", in.CloudProvider)
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) setupAzurePodNetwork(ctx context.Context, providerID, subnetworkPodCIDR string) error {
|
||||
matches := providerIDRegex.FindStringSubmatch(providerID)
|
||||
if len(matches) != 5 {
|
||||
return fmt.Errorf("error splitting providerID %q", providerID)
|
||||
}
|
||||
|
||||
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--azure-resource-group", matches[2], "--encryption", "wireguard", "--ipam", "azure",
|
||||
"--helm-set",
|
||||
"tunnel=disabled,enableIPv4Masquerade=true,azure.enabled=true,debug.enabled=true,ipv4NativeRoutingCIDR="+subnetworkPodCIDR+
|
||||
",endpointRoutes.enabled=true,encryption.enabled=true,encryption.type=wireguard,l7Proxy=false,egressMasqueradeInterfaces=eth0")
|
||||
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
|
||||
out, err := ciliumInstall.CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) setupGCPPodNetwork(ctx context.Context, nodeName, nodePodCIDR, subnetworkPodCIDR string) error {
|
||||
out, err := exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "patch", "node", nodeName, "-p", "{\"spec\":{\"podCIDR\": \""+nodePodCIDR+"\"}}").CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
// allow coredns to run on uninitialized nodes (required by cloud-controller-manager)
|
||||
err := exec.Command(kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\",\"effect\":\"NoSchedule\"}}]").Run()
|
||||
err = exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\",\"effect\":\"NoSchedule\"}},{\"op\":\"add\",\"path\":\"/spec/template/spec/nodeSelector\",\"value\":{\"node-role.kubernetes.io/control-plane\":\"\"}}]").Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.Command(kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.kubernetes.io/network-unavailable\",\"value\":\"\",\"effect\":\"NoSchedule\"}}]").Run()
|
||||
|
||||
err = exec.CommandContext(ctx, kubectlPath, "--kubeconfig", kubeConfig, "-n", "kube-system", "patch", "deployment", "coredns", "--type", "json", "-p", "[{\"op\":\"add\",\"path\":\"/spec/template/spec/tolerations/-\",\"value\":{\"key\":\"node.kubernetes.io/network-unavailable\",\"value\":\"\",\"effect\":\"NoSchedule\"}}]").Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--encryption", "wireguard", "--ipam", "kubernetes", "--ipv4-native-routing-cidr", subnetworkPodCIDR, "--helm-set", "endpointRoutes.enabled=true,tunnel=disabled")
|
||||
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
|
||||
out, err = ciliumInstall.CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesUtil) setupQemuPodNetwork(ctx context.Context) error {
|
||||
ciliumInstall := exec.CommandContext(ctx, "cilium", "install", "--encryption", "wireguard", "--helm-set", "ipam.operator.clusterPoolIPv4PodCIDRList=10.244.0.0/16,endpointRoutes.enabled=true")
|
||||
ciliumInstall.Env = append(os.Environ(), "KUBECONFIG="+kubeConfig)
|
||||
out, err := ciliumInstall.CombinedOutput()
|
||||
if err != nil {
|
||||
err = errors.New(string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupAutoscaling deploys the k8s cluster autoscaler.
|
||||
@ -142,7 +198,7 @@ func (k *KubernetesUtil) SetupCloudNodeManager(kubectl Client, cloudNodeManagerC
|
||||
}
|
||||
|
||||
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
|
||||
func (k *KubernetesUtil) JoinCluster(joinConfig []byte) error {
|
||||
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte) error {
|
||||
// TODO: audit policy should be user input
|
||||
auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal()
|
||||
if err != nil {
|
||||
@ -163,7 +219,7 @@ func (k *KubernetesUtil) JoinCluster(joinConfig []byte) error {
|
||||
}
|
||||
|
||||
// run `kubeadm join` to join a worker node to an existing Kubernetes cluster
|
||||
cmd := exec.Command(kubeadmPath, "join", "--config", joinConfigFile.Name())
|
||||
cmd := exec.CommandContext(ctx, kubeadmPath, "join", "--config", joinConfigFile.Name())
|
||||
if _, err := cmd.Output(); err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
@ -201,10 +257,10 @@ func (k *KubernetesUtil) RestartKubelet() error {
|
||||
|
||||
// GetControlPlaneJoinCertificateKey return the key which can be used in combination with the joinArgs
|
||||
// to join the Cluster as control-plane.
|
||||
func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
||||
func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error) {
|
||||
// Key will be valid for 1h (no option to reduce the duration).
|
||||
// https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-upload-certs
|
||||
output, err := exec.Command(kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output()
|
||||
output, err := exec.CommandContext(ctx, kubeadmPath, "init", "phase", "upload-certs", "--upload-certs").Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
@ -218,7 +274,7 @@ func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
||||
[upload-certs] Using certificate key:
|
||||
9555b74008f24687eb964bd90a164ecb5760a89481d9c55a77c129b7db438168
|
||||
*/
|
||||
key := regexp.MustCompile("[a-f0-9]{64}").FindString(string(output))
|
||||
key := kubernetesKeyRegexp.FindString(string(output))
|
||||
if key == "" {
|
||||
return "", fmt.Errorf("failed to parse kubeadm output: %s", string(output))
|
||||
}
|
||||
@ -226,8 +282,8 @@ func (k *KubernetesUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
||||
}
|
||||
|
||||
// CreateJoinToken creates a new bootstrap (join) token.
|
||||
func (k *KubernetesUtil) CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
output, err := exec.Command(kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output()
|
||||
func (k *KubernetesUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
output, err := exec.CommandContext(ctx, kubeadmPath, "token", "create", "--ttl", ttl.String(), "--print-join-command").Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kubeadm token create failed: %w", err)
|
||||
}
|
||||
|
25
coordinator/kubernetes/k8sutil.go
Normal file
25
coordinator/kubernetes/k8sutil.go
Normal file
@ -0,0 +1,25 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
type clusterUtil interface {
|
||||
InstallComponents(ctx context.Context, version string) error
|
||||
InitCluster(ctx context.Context, initConfig []byte) error
|
||||
JoinCluster(ctx context.Context, joinConfig []byte) error
|
||||
SetupPodNetwork(context.Context, k8sapi.SetupPodNetworkInput) error
|
||||
SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
SetupKMS(kubectl k8sapi.Client, kmsConfiguration resources.Marshaler) error
|
||||
StartKubelet() error
|
||||
RestartKubelet() error
|
||||
GetControlPlaneJoinCertificateKey(ctx context.Context) (string, error)
|
||||
CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
}
|
@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
@ -13,12 +14,6 @@ import (
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// k8s pod network cidr. This value was chosen to match the default flannel pod network.
|
||||
const (
|
||||
podNetworkCidr = "10.244.0.0/16"
|
||||
serviceCidr = "10.245.0.0/24"
|
||||
)
|
||||
|
||||
// configReader provides kubeconfig as []byte.
|
||||
type configReader interface {
|
||||
ReadKubeconfig() ([]byte, error)
|
||||
@ -30,43 +25,96 @@ type configurationProvider interface {
|
||||
JoinConfiguration(externalCloudProvider bool) k8sapi.KubeadmJoinYAML
|
||||
}
|
||||
|
||||
// KubeWrapper implements ClusterWrapper interface.
|
||||
// KubeWrapper implements Cluster interface.
|
||||
type KubeWrapper struct {
|
||||
clusterUtil k8sapi.ClusterUtil
|
||||
configProvider configurationProvider
|
||||
client k8sapi.Client
|
||||
kubeconfigReader configReader
|
||||
cloudProvider string
|
||||
clusterUtil clusterUtil
|
||||
configProvider configurationProvider
|
||||
client k8sapi.Client
|
||||
kubeconfigReader configReader
|
||||
cloudControllerManager CloudControllerManager
|
||||
cloudNodeManager CloudNodeManager
|
||||
clusterAutoscaler ClusterAutoscaler
|
||||
providerMetadata ProviderMetadata
|
||||
}
|
||||
|
||||
// New creates a new KubeWrapper with real values.
|
||||
func New(clusterUtil k8sapi.ClusterUtil, configProvider configurationProvider, client k8sapi.Client) *KubeWrapper {
|
||||
func New(cloudProvider string, clusterUtil clusterUtil, configProvider configurationProvider, client k8sapi.Client, cloudControllerManager CloudControllerManager,
|
||||
cloudNodeManager CloudNodeManager, clusterAutoscaler ClusterAutoscaler, providerMetadata ProviderMetadata,
|
||||
) *KubeWrapper {
|
||||
return &KubeWrapper{
|
||||
clusterUtil: clusterUtil,
|
||||
configProvider: configProvider,
|
||||
client: client,
|
||||
kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}},
|
||||
cloudProvider: cloudProvider,
|
||||
clusterUtil: clusterUtil,
|
||||
configProvider: configProvider,
|
||||
client: client,
|
||||
kubeconfigReader: &KubeconfigReader{fs: afero.Afero{Fs: afero.NewOsFs()}},
|
||||
cloudControllerManager: cloudControllerManager,
|
||||
cloudNodeManager: cloudNodeManager,
|
||||
clusterAutoscaler: clusterAutoscaler,
|
||||
providerMetadata: providerMetadata,
|
||||
}
|
||||
}
|
||||
|
||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||
func (k *KubeWrapper) InitCluster(in InitClusterInput) error {
|
||||
func (k *KubeWrapper) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte) error {
|
||||
// TODO: k8s version should be user input
|
||||
if err := k.clusterUtil.InstallComponents(context.TODO(), "1.23.6"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
initConfig := k.configProvider.InitConfiguration(in.SupportsCloudControllerManager)
|
||||
initConfig.SetApiServerAdvertiseAddress(in.APIServerAdvertiseIP)
|
||||
initConfig.SetNodeIP(in.NodeIP)
|
||||
initConfig.SetNodeName(in.NodeName)
|
||||
initConfig.SetPodNetworkCIDR(podNetworkCidr)
|
||||
initConfig.SetServiceCIDR(serviceCidr)
|
||||
initConfig.SetProviderID(in.ProviderID)
|
||||
nodeName := vpnIP
|
||||
var providerID string
|
||||
var instance cloudtypes.Instance
|
||||
var publicIP string
|
||||
var nodePodCIDR string
|
||||
var subnetworkPodCIDR string
|
||||
// this is the IP in "kubeadm init --control-plane-endpoint=<IP/DNS>:<port>" hence the unfortunate name
|
||||
var controlPlaneEndpointIP string
|
||||
var nodeIP string
|
||||
var err error
|
||||
|
||||
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
||||
if k.providerMetadata.Supported() {
|
||||
instance, err = k.providerMetadata.Self(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving own instance metadata failed: %w", err)
|
||||
}
|
||||
nodeName = k8sCompliantHostname(instance.Name)
|
||||
providerID = instance.ProviderID
|
||||
if len(instance.PrivateIPs) > 0 {
|
||||
nodeIP = instance.PrivateIPs[0]
|
||||
}
|
||||
if len(instance.PublicIPs) > 0 {
|
||||
publicIP = instance.PublicIPs[0]
|
||||
}
|
||||
if len(instance.AliasIPRanges) > 0 {
|
||||
nodePodCIDR = instance.AliasIPRanges[0]
|
||||
}
|
||||
subnetworkPodCIDR, err = k.providerMetadata.GetSubnetworkCIDR(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving subnetwork CIDR failed: %w", err)
|
||||
}
|
||||
controlPlaneEndpointIP = publicIP
|
||||
if k.providerMetadata.SupportsLoadBalancer() {
|
||||
controlPlaneEndpointIP, err = k.providerMetadata.GetLoadBalancerIP(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving load balancer IP failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: configure kubeadm init config
|
||||
initConfig := k.configProvider.InitConfiguration(k.cloudControllerManager.Supported())
|
||||
initConfig.SetNodeIP(nodeIP)
|
||||
initConfig.SetCertSANs([]string{publicIP, nodeIP})
|
||||
initConfig.SetNodeName(nodeName)
|
||||
initConfig.SetProviderID(providerID)
|
||||
initConfig.SetControlPlaneEndpoint(controlPlaneEndpointIP)
|
||||
initConfigYAML, err := initConfig.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding kubeadm init configuration as YAML failed: %w", err)
|
||||
}
|
||||
if err := k.clusterUtil.InitCluster(initConfigYAML); err != nil {
|
||||
if err := k.clusterUtil.InitCluster(ctx, initConfigYAML); err != nil {
|
||||
return fmt.Errorf("kubeadm init failed: %w", err)
|
||||
}
|
||||
kubeConfig, err := k.GetKubeconfig()
|
||||
@ -74,54 +122,72 @@ func (k *KubeWrapper) InitCluster(in InitClusterInput) error {
|
||||
return fmt.Errorf("reading kubeconfig after cluster initialization failed: %w", err)
|
||||
}
|
||||
k.client.SetKubeconfig(kubeConfig)
|
||||
flannel := resources.NewDefaultFlannelDeployment()
|
||||
if err = k.clusterUtil.SetupPodNetwork(k.client, flannel); err != nil {
|
||||
|
||||
// Step 3: configure & start kubernetes controllers
|
||||
|
||||
setupPodNetworkInput := k8sapi.SetupPodNetworkInput{
|
||||
CloudProvider: k.cloudProvider,
|
||||
NodeName: nodeName,
|
||||
FirstNodePodCIDR: nodePodCIDR,
|
||||
SubnetworkPodCIDR: subnetworkPodCIDR,
|
||||
ProviderID: providerID,
|
||||
}
|
||||
if err = k.clusterUtil.SetupPodNetwork(ctx, setupPodNetworkInput); err != nil {
|
||||
return fmt.Errorf("setup of pod network failed: %w", err)
|
||||
}
|
||||
|
||||
kms := resources.NewKMSDeployment(in.MasterSecret)
|
||||
kms := resources.NewKMSDeployment(masterSecret)
|
||||
if err = k.clusterUtil.SetupKMS(k.client, kms); err != nil {
|
||||
return fmt.Errorf("setup of kms failed: %w", err)
|
||||
}
|
||||
|
||||
if in.SupportsCloudControllerManager {
|
||||
cloudControllerManagerConfiguration := resources.NewDefaultCloudControllerManagerDeployment(
|
||||
in.CloudControllerManagerName, in.CloudControllerManagerImage, in.CloudControllerManagerPath, in.CloudControllerManagerExtraArgs,
|
||||
in.CloudControllerManagerVolumes, in.CloudControllerManagerVolumeMounts, in.CloudControllerManagerEnv,
|
||||
)
|
||||
if err := k.clusterUtil.SetupCloudControllerManager(k.client, cloudControllerManagerConfiguration, in.CloudControllerManagerConfigMaps, in.CloudControllerManagerSecrets); err != nil {
|
||||
return fmt.Errorf("failed to setup cloud-controller-manager: %w", err)
|
||||
}
|
||||
if err := k.setupCCM(context.TODO(), vpnIP, subnetworkPodCIDR, cloudServiceAccountURI, instance); err != nil {
|
||||
return fmt.Errorf("setting up cloud controller manager failed: %w", err)
|
||||
}
|
||||
if err := k.setupCloudNodeManager(); err != nil {
|
||||
return fmt.Errorf("setting up cloud node manager failed: %w", err)
|
||||
}
|
||||
|
||||
if in.SupportsCloudNodeManager {
|
||||
cloudNodeManagerConfiguration := resources.NewDefaultCloudNodeManagerDeployment(
|
||||
in.CloudNodeManagerImage, in.CloudNodeManagerPath, in.CloudNodeManagerExtraArgs,
|
||||
)
|
||||
if err := k.clusterUtil.SetupCloudNodeManager(k.client, cloudNodeManagerConfiguration); err != nil {
|
||||
return fmt.Errorf("failed to setup cloud-node-manager: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if in.SupportClusterAutoscaler {
|
||||
clusterAutoscalerConfiguration := resources.NewDefaultAutoscalerDeployment(in.AutoscalingVolumes, in.AutoscalingVolumeMounts, in.AutoscalingEnv)
|
||||
clusterAutoscalerConfiguration.SetAutoscalerCommand(in.AutoscalingCloudprovider, in.AutoscalingNodeGroups)
|
||||
if err := k.clusterUtil.SetupAutoscaling(k.client, clusterAutoscalerConfiguration, in.AutoscalingSecrets); err != nil {
|
||||
return fmt.Errorf("failed to setup cluster-autoscaler: %w", err)
|
||||
}
|
||||
if err := k.setupClusterAutoscaler(instance, cloudServiceAccountURI, autoscalingNodeGroups); err != nil {
|
||||
return fmt.Errorf("setting up cluster autoscaler failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// JoinCluster joins existing Kubernetes cluster.
|
||||
func (k *KubeWrapper) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, nodeName, nodeInternalIP, nodeVPNIP, providerID, certKey string, ccmSupported bool, peerRole role.Role) error {
|
||||
func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error {
|
||||
// TODO: k8s version should be user input
|
||||
if err := k.clusterUtil.InstallComponents(context.TODO(), "1.23.6"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
joinConfig := k.configProvider.JoinConfiguration(ccmSupported)
|
||||
// Step 1: retrieve cloud metadata for Kubernetes configuration
|
||||
var providerID string
|
||||
nodeName := nodeVPNIP
|
||||
nodeInternalIP := nodeVPNIP
|
||||
if k.providerMetadata.Supported() {
|
||||
instance, err := k.providerMetadata.Self(context.TODO())
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving own instance metadata failed: %w", err)
|
||||
}
|
||||
providerID = instance.ProviderID
|
||||
nodeName = instance.Name
|
||||
if len(instance.PrivateIPs) > 0 {
|
||||
nodeInternalIP = instance.PrivateIPs[0]
|
||||
}
|
||||
}
|
||||
nodeName = k8sCompliantHostname(nodeName)
|
||||
|
||||
if k.cloudControllerManager.Supported() && k.providerMetadata.Supported() {
|
||||
if err := k.prepareInstanceForCCM(context.TODO(), nodeVPNIP); err != nil {
|
||||
return fmt.Errorf("preparing node for CCM failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: configure kubeadm join config
|
||||
|
||||
joinConfig := k.configProvider.JoinConfiguration(k.cloudControllerManager.Supported())
|
||||
joinConfig.SetApiServerEndpoint(args.APIServerEndpoint)
|
||||
joinConfig.SetToken(args.Token)
|
||||
joinConfig.AppendDiscoveryTokenCaCertHash(args.CACertHashes[0])
|
||||
@ -129,13 +195,13 @@ func (k *KubeWrapper) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, nodeNam
|
||||
joinConfig.SetNodeName(nodeName)
|
||||
joinConfig.SetProviderID(providerID)
|
||||
if peerRole == role.Coordinator {
|
||||
joinConfig.SetControlPlane(nodeVPNIP, certKey)
|
||||
joinConfig.SetControlPlane(nodeInternalIP, certKey)
|
||||
}
|
||||
joinConfigYAML, err := joinConfig.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding kubeadm join configuration as YAML failed: %w", err)
|
||||
}
|
||||
if err := k.clusterUtil.JoinCluster(joinConfigYAML); err != nil {
|
||||
if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML); err != nil {
|
||||
return fmt.Errorf("joining cluster failed: %v %w ", string(joinConfigYAML), err)
|
||||
}
|
||||
|
||||
@ -156,13 +222,89 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
|
||||
}
|
||||
|
||||
// GetKubeadmCertificateKey return the key needed to join the Cluster as Control-Plane (has to be executed on a control-plane; errors otherwise).
|
||||
func (k *KubeWrapper) GetKubeadmCertificateKey() (string, error) {
|
||||
return k.clusterUtil.GetControlPlaneJoinCertificateKey()
|
||||
func (k *KubeWrapper) GetKubeadmCertificateKey(ctx context.Context) (string, error) {
|
||||
return k.clusterUtil.GetControlPlaneJoinCertificateKey(ctx)
|
||||
}
|
||||
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
func (k *KubeWrapper) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return k.clusterUtil.CreateJoinToken(ttl)
|
||||
func (k *KubeWrapper) GetJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return k.clusterUtil.CreateJoinToken(ctx, ttl)
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupCCM(ctx context.Context, vpnIP, subnetworkPodCIDR, cloudServiceAccountURI string, instance cloudtypes.Instance) error {
|
||||
if !k.cloudControllerManager.Supported() {
|
||||
return nil
|
||||
}
|
||||
if err := k.prepareInstanceForCCM(context.TODO(), vpnIP); err != nil {
|
||||
return fmt.Errorf("preparing node for CCM failed: %w", err)
|
||||
}
|
||||
ccmConfigMaps, err := k.cloudControllerManager.ConfigMaps(instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining ConfigMaps for CCM failed: %w", err)
|
||||
}
|
||||
ccmSecrets, err := k.cloudControllerManager.Secrets(ctx, instance, cloudServiceAccountURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining Secrets for CCM failed: %w", err)
|
||||
}
|
||||
|
||||
cloudControllerManagerConfiguration := resources.NewDefaultCloudControllerManagerDeployment(
|
||||
k.cloudControllerManager.Name(), k.cloudControllerManager.Image(), k.cloudControllerManager.Path(), subnetworkPodCIDR,
|
||||
k.cloudControllerManager.ExtraArgs(), k.cloudControllerManager.Volumes(), k.cloudControllerManager.VolumeMounts(), k.cloudControllerManager.Env(),
|
||||
)
|
||||
if err := k.clusterUtil.SetupCloudControllerManager(k.client, cloudControllerManagerConfiguration, ccmConfigMaps, ccmSecrets); err != nil {
|
||||
return fmt.Errorf("failed to setup cloud-controller-manager: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupCloudNodeManager() error {
|
||||
if !k.cloudNodeManager.Supported() {
|
||||
return nil
|
||||
}
|
||||
cloudNodeManagerConfiguration := resources.NewDefaultCloudNodeManagerDeployment(
|
||||
k.cloudNodeManager.Image(), k.cloudNodeManager.Path(), k.cloudNodeManager.ExtraArgs(),
|
||||
)
|
||||
if err := k.clusterUtil.SetupCloudNodeManager(k.client, cloudNodeManagerConfiguration); err != nil {
|
||||
return fmt.Errorf("failed to setup cloud-node-manager: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupClusterAutoscaler(instance cloudtypes.Instance, cloudServiceAccountURI string, autoscalingNodeGroups []string) error {
|
||||
if !k.clusterAutoscaler.Supported() {
|
||||
return nil
|
||||
}
|
||||
caSecrets, err := k.clusterAutoscaler.Secrets(instance, cloudServiceAccountURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining Secrets for cluster-autoscaler failed: %w", err)
|
||||
}
|
||||
|
||||
clusterAutoscalerConfiguration := resources.NewDefaultAutoscalerDeployment(k.clusterAutoscaler.Volumes(), k.clusterAutoscaler.VolumeMounts(), k.clusterAutoscaler.Env())
|
||||
clusterAutoscalerConfiguration.SetAutoscalerCommand(k.clusterAutoscaler.Name(), autoscalingNodeGroups)
|
||||
if err := k.clusterUtil.SetupAutoscaling(k.client, clusterAutoscalerConfiguration, caSecrets); err != nil {
|
||||
return fmt.Errorf("failed to setup cluster-autoscaler: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareInstanceForCCM sets the vpn IP in cloud provider metadata.
|
||||
func (k *KubeWrapper) prepareInstanceForCCM(ctx context.Context, vpnIP string) error {
|
||||
if err := k.providerMetadata.SetVPNIP(ctx, vpnIP); err != nil {
|
||||
return fmt.Errorf("setting VPN IP for cloud-controller-manager failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
|
||||
// The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ .
|
||||
// Only a simple heuristic is used for now (to lowercase, replace underscores).
|
||||
func k8sCompliantHostname(in string) string {
|
||||
hostname := strings.ToLower(in)
|
||||
hostname = strings.ReplaceAll(hostname, "_", "-")
|
||||
return hostname
|
||||
}
|
||||
|
||||
// StartKubelet starts the kubelet service.
|
||||
|
@ -3,9 +3,11 @@ package kubernetes
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi"
|
||||
"github.com/edgelesssys/constellation/coordinator/kubernetes/k8sapi/resources"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
@ -13,13 +15,484 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestInitCluster(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
coordinatorVPNIP := "192.0.2.0"
|
||||
serviceAccountUri := "some-service-account-uri"
|
||||
masterSecret := []byte("some-master-secret")
|
||||
autoscalingNodeGroups := []string{"0,10,autoscaling_group_0"}
|
||||
|
||||
nodeName := "node-name"
|
||||
providerID := "provider-id"
|
||||
privateIP := "192.0.2.1"
|
||||
publicIP := "192.0.2.2"
|
||||
loadbalancerIP := "192.0.2.3"
|
||||
aliasIPRange := "192.0.2.0/24"
|
||||
|
||||
testCases := map[string]struct {
|
||||
clusterUtil stubClusterUtil
|
||||
kubeCTL stubKubeCTL
|
||||
providerMetadata ProviderMetadata
|
||||
CloudControllerManager CloudControllerManager
|
||||
CloudNodeManager CloudNodeManager
|
||||
ClusterAutoscaler ClusterAutoscaler
|
||||
kubeconfigReader configReader
|
||||
wantConfig k8sapi.KubeadmInitYAML
|
||||
wantErr bool
|
||||
}{
|
||||
"kubeadm init works without metadata": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{SupportedResp: false},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantConfig: k8sapi.KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
KubeletExtraArgs: map[string]string{
|
||||
"node-ip": "",
|
||||
"provider-id": "",
|
||||
},
|
||||
Name: coordinatorVPNIP,
|
||||
},
|
||||
},
|
||||
ClusterConfiguration: kubeadm.ClusterConfiguration{},
|
||||
},
|
||||
},
|
||||
"kubeadm init works with metadata and loadbalancer": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SupportedResp: true,
|
||||
SelfResp: cloudtypes.Instance{
|
||||
Name: nodeName,
|
||||
ProviderID: providerID,
|
||||
PrivateIPs: []string{privateIP},
|
||||
PublicIPs: []string{publicIP},
|
||||
AliasIPRanges: []string{aliasIPRange},
|
||||
},
|
||||
GetLoadBalancerIPResp: loadbalancerIP,
|
||||
SupportsLoadBalancerResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantConfig: k8sapi.KubeadmInitYAML{
|
||||
InitConfiguration: kubeadm.InitConfiguration{
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
KubeletExtraArgs: map[string]string{
|
||||
"node-ip": privateIP,
|
||||
"provider-id": providerID,
|
||||
},
|
||||
Name: nodeName,
|
||||
},
|
||||
},
|
||||
ClusterConfiguration: kubeadm.ClusterConfiguration{
|
||||
ControlPlaneEndpoint: loadbalancerIP,
|
||||
APIServer: kubeadm.APIServer{
|
||||
CertSANs: []string{publicIP, privateIP},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
"kubeadm init fails when retrieving metadata self": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SelfErr: someErr,
|
||||
SupportedResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when retrieving metadata subnetwork cidr": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
GetSubnetworkCIDRErr: someErr,
|
||||
SupportedResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when retrieving metadata loadbalancer ip": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
GetLoadBalancerIPErr: someErr,
|
||||
SupportsLoadBalancerResp: true,
|
||||
SupportedResp: true,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when applying the init config": {
|
||||
clusterUtil: stubClusterUtil{initClusterErr: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting up the pod network": {
|
||||
clusterUtil: stubClusterUtil{setupPodNetworkErr: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting the cloud contoller manager": {
|
||||
clusterUtil: stubClusterUtil{setupCloudControllerManagerError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{SupportedResp: true},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting the cloud node manager": {
|
||||
clusterUtil: stubClusterUtil{setupCloudNodeManagerError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: true},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting the cluster autoscaler": {
|
||||
clusterUtil: stubClusterUtil{setupAutoscalingError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{SupportedResp: true},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when reading kubeconfig": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
ReadErr: someErr,
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm init fails when setting up the kms": {
|
||||
clusterUtil: stubClusterUtil{setupKMSError: someErr},
|
||||
kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
providerMetadata: &stubProviderMetadata{SupportedResp: false},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
CloudNodeManager: &stubCloudNodeManager{SupportedResp: false},
|
||||
ClusterAutoscaler: &stubClusterAutoscaler{},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
kube := KubeWrapper{
|
||||
clusterUtil: &tc.clusterUtil,
|
||||
providerMetadata: tc.providerMetadata,
|
||||
cloudControllerManager: tc.CloudControllerManager,
|
||||
cloudNodeManager: tc.CloudNodeManager,
|
||||
clusterAutoscaler: tc.ClusterAutoscaler,
|
||||
configProvider: &stubConfigProvider{InitConfig: k8sapi.KubeadmInitYAML{}},
|
||||
client: &tc.kubeCTL,
|
||||
kubeconfigReader: tc.kubeconfigReader,
|
||||
}
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, coordinatorVPNIP, masterSecret)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
var kubeadmConfig k8sapi.KubeadmInitYAML
|
||||
require.NoError(resources.UnmarshalK8SResources(tc.clusterUtil.initConfigs[0], &kubeadmConfig))
|
||||
require.Equal(tc.wantConfig.ClusterConfiguration, kubeadmConfig.ClusterConfiguration)
|
||||
require.Equal(tc.wantConfig.InitConfiguration, kubeadmConfig.InitConfiguration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinCluster(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
joinCommand := &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.0:6443",
|
||||
Token: "kube-fake-token",
|
||||
CACertHashes: []string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"},
|
||||
}
|
||||
|
||||
nodeVPNIP := "192.0.2.0"
|
||||
certKey := "cert-key"
|
||||
|
||||
testCases := map[string]struct {
|
||||
clusterUtil stubClusterUtil
|
||||
providerMetadata ProviderMetadata
|
||||
CloudControllerManager CloudControllerManager
|
||||
wantConfig kubeadm.JoinConfiguration
|
||||
role role.Role
|
||||
wantErr bool
|
||||
}{
|
||||
"kubeadm join worker works without metadata": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
role: role.Node,
|
||||
wantConfig: kubeadm.JoinConfiguration{
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: joinCommand,
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
Name: nodeVPNIP,
|
||||
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kubeadm join worker works with metadata": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SupportedResp: true,
|
||||
SelfResp: cloudtypes.Instance{
|
||||
ProviderID: "provider-id",
|
||||
Name: "metadata-name",
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
role: role.Node,
|
||||
wantConfig: kubeadm.JoinConfiguration{
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: joinCommand,
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
Name: "metadata-name",
|
||||
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kubeadm join worker works with metadata and cloud controller manager": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SupportedResp: true,
|
||||
SelfResp: cloudtypes.Instance{
|
||||
ProviderID: "provider-id",
|
||||
Name: "metadata-name",
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{
|
||||
SupportedResp: true,
|
||||
},
|
||||
role: role.Node,
|
||||
wantConfig: kubeadm.JoinConfiguration{
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: joinCommand,
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
Name: "metadata-name",
|
||||
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kubeadm join control-plane node works with metadata": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SupportedResp: true,
|
||||
SelfResp: cloudtypes.Instance{
|
||||
ProviderID: "provider-id",
|
||||
Name: "metadata-name",
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
role: role.Coordinator,
|
||||
wantConfig: kubeadm.JoinConfiguration{
|
||||
Discovery: kubeadm.Discovery{
|
||||
BootstrapToken: joinCommand,
|
||||
},
|
||||
NodeRegistration: kubeadm.NodeRegistrationOptions{
|
||||
Name: "metadata-name",
|
||||
KubeletExtraArgs: map[string]string{"node-ip": "192.0.2.1"},
|
||||
},
|
||||
ControlPlane: &kubeadm.JoinControlPlane{
|
||||
LocalAPIEndpoint: kubeadm.APIEndpoint{
|
||||
AdvertiseAddress: "192.0.2.1",
|
||||
BindPort: 6443,
|
||||
},
|
||||
CertificateKey: certKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
"kubeadm join worker fails when retrieving self metadata": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SupportedResp: true,
|
||||
SelfErr: someErr,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
role: role.Node,
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm join worker fails when applying the join config": {
|
||||
clusterUtil: stubClusterUtil{joinClusterErr: someErr},
|
||||
providerMetadata: &stubProviderMetadata{},
|
||||
CloudControllerManager: &stubCloudControllerManager{},
|
||||
role: role.Node,
|
||||
wantErr: true,
|
||||
},
|
||||
"kubeadm join worker works fails when setting the metadata for the cloud controller manager": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
providerMetadata: &stubProviderMetadata{
|
||||
SupportedResp: true,
|
||||
SelfResp: cloudtypes.Instance{
|
||||
ProviderID: "provider-id",
|
||||
Name: "metadata-name",
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
SetVPNIPErr: someErr,
|
||||
},
|
||||
CloudControllerManager: &stubCloudControllerManager{
|
||||
SupportedResp: true,
|
||||
},
|
||||
role: role.Node,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
kube := KubeWrapper{
|
||||
clusterUtil: &tc.clusterUtil,
|
||||
providerMetadata: tc.providerMetadata,
|
||||
cloudControllerManager: tc.CloudControllerManager,
|
||||
configProvider: &stubConfigProvider{},
|
||||
}
|
||||
|
||||
err := kube.JoinCluster(context.Background(), joinCommand, nodeVPNIP, certKey, tc.role)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
var joinYaml k8sapi.KubeadmJoinYAML
|
||||
joinYaml, err = joinYaml.Unmarshal(tc.clusterUtil.joinConfigs[0])
|
||||
require.NoError(err)
|
||||
|
||||
assert.Equal(tc.wantConfig, joinYaml.JoinConfiguration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetKubeconfig(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
Kubewrapper KubeWrapper
|
||||
wantErr bool
|
||||
}{
|
||||
"check single replacement": {
|
||||
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("127.0.0.1:16443"),
|
||||
}},
|
||||
},
|
||||
"check multiple replacement": {
|
||||
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("127.0.0.1:16443...127.0.0.1:16443"),
|
||||
}},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
data, err := tc.Kubewrapper.GetKubeconfig()
|
||||
require.NoError(err)
|
||||
assert.NotContains(string(data), "127.0.0.1:16443")
|
||||
assert.Contains(string(data), "10.118.0.1:6443")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestK8sCompliantHostname(t *testing.T) {
|
||||
compliantHostname := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`)
|
||||
testCases := map[string]struct {
|
||||
hostname string
|
||||
wantHostname string
|
||||
}{
|
||||
"azure scale set names work": {
|
||||
hostname: "constellation-scale-set-coordinators-name_0",
|
||||
wantHostname: "constellation-scale-set-coordinators-name-0",
|
||||
},
|
||||
"compliant hostname is not modified": {
|
||||
hostname: "abcd-123",
|
||||
wantHostname: "abcd-123",
|
||||
},
|
||||
"uppercase hostnames are lowercased": {
|
||||
hostname: "ABCD",
|
||||
wantHostname: "abcd",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
hostname := k8sCompliantHostname(tc.hostname)
|
||||
|
||||
assert.Equal(tc.wantHostname, hostname)
|
||||
assert.Regexp(compliantHostname, hostname)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubClusterUtil struct {
|
||||
installComponentsErr error
|
||||
initClusterErr error
|
||||
@ -42,12 +515,12 @@ func (s *stubClusterUtil) InstallComponents(ctx context.Context, version string)
|
||||
return s.installComponentsErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) InitCluster(initConfig []byte) error {
|
||||
func (s *stubClusterUtil) InitCluster(ctx context.Context, initConfig []byte) error {
|
||||
s.initConfigs = append(s.initConfigs, initConfig)
|
||||
return s.initClusterErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupPodNetwork(kubectl k8sapi.Client, podNetworkConfiguration resources.Marshaler) error {
|
||||
func (s *stubClusterUtil) SetupPodNetwork(context.Context, k8sapi.SetupPodNetworkInput) error {
|
||||
return s.setupPodNetworkErr
|
||||
}
|
||||
|
||||
@ -67,7 +540,7 @@ func (s *stubClusterUtil) SetupCloudNodeManager(kubectl k8sapi.Client, cloudNode
|
||||
return s.setupCloudNodeManagerError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) JoinCluster(joinConfig []byte) error {
|
||||
func (s *stubClusterUtil) JoinCluster(ctx context.Context, joinConfig []byte) error {
|
||||
s.joinConfigs = append(s.joinConfigs, joinConfig)
|
||||
return s.joinClusterErr
|
||||
}
|
||||
@ -80,11 +553,11 @@ func (s *stubClusterUtil) RestartKubelet() error {
|
||||
return s.restartKubeletErr
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) GetControlPlaneJoinCertificateKey() (string, error) {
|
||||
func (s *stubClusterUtil) GetControlPlaneJoinCertificateKey(context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
func (s *stubClusterUtil) CreateJoinToken(ctx context.Context, ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return s.createJoinTokenResponse, s.createJoinTokenErr
|
||||
}
|
||||
|
||||
@ -132,163 +605,3 @@ type stubKubeconfigReader struct {
|
||||
func (s *stubKubeconfigReader) ReadKubeconfig() ([]byte, error) {
|
||||
return s.Kubeconfig, s.ReadErr
|
||||
}
|
||||
|
||||
func TestInitCluster(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
coordinatorVPNIP := "192.0.2.0"
|
||||
coordinatorProviderID := "somecloudprovider://instance-id"
|
||||
instanceName := "instance-id"
|
||||
supportsClusterAutoscaler := false
|
||||
cloudprovider := "some-cloudprovider"
|
||||
cloudControllerManagerImage := "some-image:latest"
|
||||
cloudControllerManagerPath := "/some_path"
|
||||
autoscalingNodeGroups := []string{"0,10,autoscaling_group_0"}
|
||||
|
||||
testCases := map[string]struct {
|
||||
clusterUtil stubClusterUtil
|
||||
kubeCTL stubKubeCTL
|
||||
kubeconfigReader stubKubeconfigReader
|
||||
initConfig k8sapi.KubeadmInitYAML
|
||||
joinConfig k8sapi.KubeadmJoinYAML
|
||||
wantErr bool
|
||||
}{
|
||||
"kubeadm init works": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
kubeconfigReader: stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
"kubeadm init errors": {
|
||||
clusterUtil: stubClusterUtil{initClusterErr: someErr},
|
||||
kubeconfigReader: stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"pod network setup errors": {
|
||||
clusterUtil: stubClusterUtil{setupPodNetworkErr: someErr},
|
||||
kubeconfigReader: stubKubeconfigReader{
|
||||
Kubeconfig: []byte("someKubeconfig"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
kube := KubeWrapper{
|
||||
clusterUtil: &tc.clusterUtil,
|
||||
configProvider: &stubConfigProvider{InitConfig: k8sapi.KubeadmInitYAML{}},
|
||||
client: &tc.kubeCTL,
|
||||
kubeconfigReader: &tc.kubeconfigReader,
|
||||
}
|
||||
err := kube.InitCluster(
|
||||
InitClusterInput{
|
||||
APIServerAdvertiseIP: coordinatorVPNIP,
|
||||
NodeName: instanceName,
|
||||
ProviderID: coordinatorProviderID,
|
||||
SupportClusterAutoscaler: supportsClusterAutoscaler,
|
||||
AutoscalingCloudprovider: cloudprovider,
|
||||
AutoscalingNodeGroups: autoscalingNodeGroups,
|
||||
CloudControllerManagerName: cloudprovider,
|
||||
CloudControllerManagerImage: cloudControllerManagerImage,
|
||||
CloudControllerManagerPath: cloudControllerManagerPath,
|
||||
},
|
||||
)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
var kubeadmConfig k8sapi.KubeadmInitYAML
|
||||
require.NoError(resources.UnmarshalK8SResources(tc.clusterUtil.initConfigs[0], &kubeadmConfig))
|
||||
assert.Equal(kubeadmConfig.InitConfiguration.LocalAPIEndpoint.AdvertiseAddress, "192.0.2.0")
|
||||
assert.Equal(kubeadmConfig.ClusterConfiguration.Networking.PodSubnet, "10.244.0.0/16")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinCluster(t *testing.T) {
|
||||
someErr := errors.New("failed")
|
||||
joinCommand := &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.0:6443",
|
||||
Token: "kube-fake-token",
|
||||
CACertHashes: []string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"},
|
||||
}
|
||||
|
||||
nodeVPNIP := "192.0.2.0"
|
||||
coordinatorProviderID := "somecloudprovider://instance-id"
|
||||
instanceName := "instance-id"
|
||||
client := fakeK8SClient{}
|
||||
|
||||
testCases := map[string]struct {
|
||||
clusterUtil stubClusterUtil
|
||||
wantErr bool
|
||||
}{
|
||||
"kubeadm join works": {
|
||||
clusterUtil: stubClusterUtil{},
|
||||
wantErr: false,
|
||||
},
|
||||
"kubeadm join errors": {
|
||||
clusterUtil: stubClusterUtil{joinClusterErr: someErr},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
kube := New(&tc.clusterUtil, &stubConfigProvider{}, &client)
|
||||
err := kube.JoinCluster(joinCommand, instanceName, nodeVPNIP, nodeVPNIP, coordinatorProviderID, "", true, role.Node)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
|
||||
var joinConfig kubeadm.JoinConfiguration
|
||||
require.NoError(yaml.Unmarshal(tc.clusterUtil.joinConfigs[0], &joinConfig))
|
||||
|
||||
assert.Equal("192.0.2.0:6443", joinConfig.Discovery.BootstrapToken.APIServerEndpoint)
|
||||
assert.Equal("kube-fake-token", joinConfig.Discovery.BootstrapToken.Token)
|
||||
assert.Equal([]string{"sha256:a60ebe9b0879090edd83b40a4df4bebb20506bac1e51d518ff8f4505a721930f"}, joinConfig.Discovery.BootstrapToken.CACertHashes)
|
||||
assert.Equal(map[string]string{"node-ip": "192.0.2.0"}, joinConfig.NodeRegistration.KubeletExtraArgs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetKubeconfig(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
Kubewrapper KubeWrapper
|
||||
wantErr bool
|
||||
}{
|
||||
"check single replacement": {
|
||||
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("127.0.0.1:16443"),
|
||||
}},
|
||||
},
|
||||
"check multiple replacement": {
|
||||
Kubewrapper: KubeWrapper{kubeconfigReader: &stubKubeconfigReader{
|
||||
Kubeconfig: []byte("127.0.0.1:16443...127.0.0.1:16443"),
|
||||
}},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
data, err := tc.Kubewrapper.GetKubeconfig()
|
||||
require.NoError(err)
|
||||
assert.NotContains(string(data), "127.0.0.1:16443")
|
||||
assert.Contains(string(data), "10.118.0.1:6443")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
}
|
||||
|
||||
logToCLI("Initializing Kubernetes ...")
|
||||
kubeconfig, err := a.core.InitCluster(in.AutoscalingNodeGroups, in.CloudServiceAccountUri, in.MasterSecret)
|
||||
kubeconfig, err := a.core.InitCluster(context.TODO(), in.AutoscalingNodeGroups, in.CloudServiceAccountUri, in.MasterSecret)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "initializing Kubernetes cluster failed: %v", err)
|
||||
}
|
||||
|
@ -39,6 +39,6 @@ type Core interface {
|
||||
|
||||
CreateSSHUsers([]ssh.UserKey) error
|
||||
|
||||
InitCluster(autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error)
|
||||
JoinCluster(joinToken *kubeadm.BootstrapTokenDiscovery, certificateKey string, role role.Role) error
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error)
|
||||
JoinCluster(ctx context.Context, joinToken *kubeadm.BootstrapTokenDiscovery, certificateKey string, role role.Role) error
|
||||
}
|
||||
|
@ -122,12 +122,12 @@ func (c *fakeCore) UpdatePeers(peers []peer.Peer) error {
|
||||
return c.UpdatePeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitCluster(autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error) {
|
||||
func (c *fakeCore) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error) {
|
||||
c.autoscalingNodeGroups = autoscalingNodeGroups
|
||||
return c.kubeconfig, nil
|
||||
}
|
||||
|
||||
func (c *fakeCore) JoinCluster(args *kubeadm.BootstrapTokenDiscovery, _ string, _ role.Role) error {
|
||||
func (c *fakeCore) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, _ string, _ role.Role) error {
|
||||
c.joinArgs = append(c.joinArgs, *args)
|
||||
return c.joinClusterErr
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ func (a *API) ActivateAsAdditionalCoordinator(ctx context.Context, in *pubproto.
|
||||
return nil, status.Errorf(codes.Internal, "add peers to vpn: %v", err)
|
||||
}
|
||||
a.logger.Info("about to join the k8s cluster")
|
||||
err = a.core.JoinCluster(joinArgs, certKey, role.Coordinator)
|
||||
err = a.core.JoinCluster(context.TODO(), joinArgs, certKey, role.Coordinator)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func (a *API) JoinCluster(ctx context.Context, in *pubproto.JoinClusterRequest)
|
||||
return nil, status.Errorf(codes.Internal, "request K8s join string: %v", err)
|
||||
}
|
||||
|
||||
err = a.core.JoinCluster(&kubeadm.BootstrapTokenDiscovery{
|
||||
err = a.core.JoinCluster(context.TODO(), &kubeadm.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: resp.ApiServerEndpoint,
|
||||
Token: resp.Token,
|
||||
CACertHashes: []string{resp.DiscoveryTokenCaCertHash},
|
||||
|
@ -47,7 +47,7 @@ func (a *API) GetUpdate(ctx context.Context, in *vpnproto.GetUpdateRequest) (*vp
|
||||
|
||||
// GetK8SJoinArgs is the RPC call to get the K8s join args.
|
||||
func (a *API) GetK8SJoinArgs(ctx context.Context, in *vpnproto.GetK8SJoinArgsRequest) (*vpnproto.GetK8SJoinArgsResponse, error) {
|
||||
args, err := a.core.GetK8sJoinArgs()
|
||||
args, err := a.core.GetK8sJoinArgs(context.TODO())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
@ -60,7 +60,7 @@ func (a *API) GetK8SJoinArgs(ctx context.Context, in *vpnproto.GetK8SJoinArgsReq
|
||||
|
||||
// GetK8SCertificateKey is the RPC call to get the K8s certificateKey necessary for control-plane join.
|
||||
func (a *API) GetK8SCertificateKey(ctx context.Context, in *vpnproto.GetK8SCertificateKeyRequest) (*vpnproto.GetK8SCertificateKeyResponse, error) {
|
||||
certKey, err := a.core.GetK8SCertificateKey()
|
||||
certKey, err := a.core.GetK8SCertificateKey(context.TODO())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
@ -79,7 +79,7 @@ func (a *API) GetDataKey(ctx context.Context, in *vpnproto.GetDataKeyRequest) (*
|
||||
type Core interface {
|
||||
GetPeers(resourceVersion int) (int, []peer.Peer, error)
|
||||
NotifyNodeHeartbeat(net.Addr)
|
||||
GetK8sJoinArgs() (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
GetK8SCertificateKey() (string, error)
|
||||
GetK8sJoinArgs(ctx context.Context) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
GetK8SCertificateKey(ctx context.Context) (string, error)
|
||||
GetDataKey(ctx context.Context, dataKeyID string, length int) ([]byte, error)
|
||||
}
|
||||
|
@ -202,11 +202,11 @@ func (c *stubCore) NotifyNodeHeartbeat(addr net.Addr) {
|
||||
c.heartbeats = append(c.heartbeats, addr)
|
||||
}
|
||||
|
||||
func (c *stubCore) GetK8sJoinArgs() (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
func (c *stubCore) GetK8sJoinArgs(context.Context) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
return &c.joinArgs, nil
|
||||
}
|
||||
|
||||
func (c *stubCore) GetK8SCertificateKey() (string, error) {
|
||||
func (c *stubCore) GetK8SCertificateKey(context.Context) (string, error) {
|
||||
return c.kubeadmCertificateKey, c.getCertKeyErr
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,19 @@ func getIPsFromConfig(stat statec.ConstellationState, config configc.Config) ([]
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(coordinators.PublicIPs(), nodes.PublicIPs()...), nil
|
||||
|
||||
var ips []string
|
||||
// only deploy to non empty public IPs
|
||||
for _, ip := range append(coordinators.PublicIPs(), nodes.PublicIPs()...) {
|
||||
if ip != "" {
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
}
|
||||
if len(ips) == 0 {
|
||||
return nil, fmt.Errorf("no public IPs found in statefile")
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -5,16 +5,16 @@ import (
|
||||
"fmt"
|
||||
|
||||
azurecloud "github.com/edgelesssys/constellation/coordinator/cloudprovider/azure"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
gcpcloud "github.com/edgelesssys/constellation/coordinator/cloudprovider/gcp"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
)
|
||||
|
||||
type providerMetadata interface {
|
||||
// List retrieves all instances belonging to the current constellation.
|
||||
List(ctx context.Context) ([]core.Instance, error)
|
||||
List(ctx context.Context) ([]cloudtypes.Instance, error)
|
||||
// Self retrieves the current instance.
|
||||
Self(ctx context.Context) (core.Instance, error)
|
||||
Self(ctx context.Context) (cloudtypes.Instance, error)
|
||||
}
|
||||
|
||||
// Fetcher checks the metadata service to search for instances that were set up for debugging and cloud provider specific SSH keys.
|
||||
@ -66,7 +66,7 @@ func (f *Fetcher) DiscoverDebugdIPs(ctx context.Context) ([]string, error) {
|
||||
}
|
||||
var ips []string
|
||||
for _, instance := range instances {
|
||||
ips = append(ips, instance.IPs...)
|
||||
ips = append(ips, instance.PrivateIPs...)
|
||||
}
|
||||
return ips, nil
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -21,15 +21,15 @@ func TestDiscoverDebugIPs(t *testing.T) {
|
||||
}{
|
||||
"disovery works": {
|
||||
meta: stubMetadata{
|
||||
listRes: []core.Instance{
|
||||
listRes: []cloudtypes.Instance{
|
||||
{
|
||||
IPs: []string{"192.0.2.0"},
|
||||
PrivateIPs: []string{"192.0.2.0"},
|
||||
},
|
||||
{
|
||||
IPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
},
|
||||
{
|
||||
IPs: []string{"192.0.2.2"},
|
||||
PrivateIPs: []string{"192.0.2.2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -75,7 +75,7 @@ func TestFetchSSHKeys(t *testing.T) {
|
||||
}{
|
||||
"fetch works": {
|
||||
meta: stubMetadata{
|
||||
selfRes: core.Instance{
|
||||
selfRes: cloudtypes.Instance{
|
||||
Name: "name",
|
||||
ProviderID: "provider-id",
|
||||
SSHKeys: map[string][]string{"bob": {"ssh-rsa bobskey"}},
|
||||
@ -117,24 +117,24 @@ func TestFetchSSHKeys(t *testing.T) {
|
||||
}
|
||||
|
||||
type stubMetadata struct {
|
||||
listRes []core.Instance
|
||||
listRes []cloudtypes.Instance
|
||||
listErr error
|
||||
selfRes core.Instance
|
||||
selfRes cloudtypes.Instance
|
||||
selfErr error
|
||||
getInstanceRes core.Instance
|
||||
getInstanceRes cloudtypes.Instance
|
||||
getInstanceErr error
|
||||
supportedRes bool
|
||||
}
|
||||
|
||||
func (m *stubMetadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
func (m *stubMetadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
return m.listRes, m.listErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) Self(ctx context.Context) (core.Instance, error) {
|
||||
func (m *stubMetadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return m.selfRes, m.selfErr
|
||||
}
|
||||
|
||||
func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
func (m *stubMetadata) GetInstance(ctx context.Context, providerID string) (cloudtypes.Instance, error) {
|
||||
return m.getInstanceRes, m.getInstanceErr
|
||||
}
|
||||
|
||||
|
9
go.mod
9
go.mod
@ -32,7 +32,10 @@ replace (
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.24.0
|
||||
)
|
||||
|
||||
replace github.com/nmiculinic/wg-quick-go v0.1.3 => github.com/katexochen/wg-quick-go v0.1.3-beta.1
|
||||
replace (
|
||||
github.com/martinjungblut/go-cryptsetup => github.com/daniel-weisse/go-cryptsetup v0.0.0-20220511084044-b537356aa24b
|
||||
github.com/nmiculinic/wg-quick-go v0.1.3 => github.com/katexochen/wg-quick-go v0.1.3-beta.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.5.0
|
||||
@ -105,7 +108,6 @@ require (
|
||||
k8s.io/kubernetes v1.24.0
|
||||
k8s.io/mount-utils v0.24.0
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -223,6 +225,5 @@ require (
|
||||
sigs.k8s.io/kustomize/api v0.11.4 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/martinjungblut/go-cryptsetup => github.com/daniel-weisse/go-cryptsetup v0.0.0-20220511084044-b537356aa24b
|
||||
|
@ -11,6 +11,8 @@ import (
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
var seeAlsoRegexp = regexp.MustCompile(`(?s)### SEE ALSO\n.+?\n\n`)
|
||||
|
||||
func main() {
|
||||
cobra.EnableCommandSorting = false
|
||||
rootCmd := cmd.NewRootCmd()
|
||||
@ -28,7 +30,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Remove "see also" sections. They list parent and child commands, which is not interesting for us.
|
||||
cleanedBody := regexp.MustCompile(`(?s)### SEE ALSO\n.+?\n\n`).ReplaceAll(body.Bytes(), nil)
|
||||
cleanedBody := seeAlsoRegexp.ReplaceAll(body.Bytes(), nil)
|
||||
|
||||
fmt.Printf("Commands:\n\n%s\n%s", cmdList, cleanedBody)
|
||||
}
|
||||
|
@ -197,6 +197,13 @@ func Default() *Config {
|
||||
FromPort: constants.NodePortFrom,
|
||||
ToPort: constants.NodePortTo,
|
||||
},
|
||||
{
|
||||
Name: "kubernetes",
|
||||
Description: "Kubernetes",
|
||||
Protocol: "tcp",
|
||||
IPRange: "0.0.0.0/0",
|
||||
FromPort: constants.KubernetesPort,
|
||||
},
|
||||
},
|
||||
Provider: ProviderConfig{
|
||||
// TODO remove our subscriptions from the default config
|
||||
@ -204,7 +211,7 @@ func Default() *Config {
|
||||
SubscriptionID: "0d202bbb-4fa7-4af8-8125-58c269a05435",
|
||||
TenantID: "adb650a8-5da3-4b15-b4b0-3daf65ff7626",
|
||||
Location: "North Europe",
|
||||
Image: "/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/CONSTELLATION-IMAGES/providers/Microsoft.Compute/galleries/Constellation/images/constellation-coreos/versions/0.0.1654096948",
|
||||
Image: "/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/CONSTELLATION-IMAGES/providers/Microsoft.Compute/galleries/Constellation/images/constellation-coreos/versions/0.0.1654162332",
|
||||
Measurements: azurePCRs,
|
||||
UserAssignedIdentity: "/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-dev-identity",
|
||||
},
|
||||
@ -212,7 +219,7 @@ func Default() *Config {
|
||||
Project: "constellation-331613",
|
||||
Region: "europe-west3",
|
||||
Zone: "europe-west3-b",
|
||||
Image: "projects/constellation-images/global/images/constellation-coreos-1654096948",
|
||||
Image: "projects/constellation-images/global/images/constellation-coreos-1654162332",
|
||||
ServiceAccountRoles: []string{
|
||||
"roles/compute.instanceAdmin.v1",
|
||||
"roles/compute.networkAdmin",
|
||||
|
@ -29,8 +29,9 @@ const (
|
||||
NVMEOverTCPPort = 8009
|
||||
// Default NodePort Range
|
||||
// https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
NodePortFrom = 30000
|
||||
NodePortTo = 32767
|
||||
NodePortFrom = 30000
|
||||
NodePortTo = 32767
|
||||
KubernetesPort = 6443
|
||||
|
||||
//
|
||||
// Filenames.
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
"github.com/edgelesssys/constellation/coordinator/cloudprovider/cloudtypes"
|
||||
"github.com/edgelesssys/constellation/coordinator/core"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
@ -20,33 +21,33 @@ import (
|
||||
)
|
||||
|
||||
func TestRequestKeyLoop(t *testing.T) {
|
||||
defaultInstance := core.Instance{
|
||||
defaultInstance := cloudtypes.Instance{
|
||||
Name: "test-instance",
|
||||
ProviderID: "/test/provider",
|
||||
Role: role.Coordinator,
|
||||
IPs: []string{"192.0.2.1"},
|
||||
PrivateIPs: []string{"192.0.2.1"},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
server *stubAPIServer
|
||||
wantCalls int
|
||||
listResponse []core.Instance
|
||||
listResponse []cloudtypes.Instance
|
||||
dontStartServer bool
|
||||
}{
|
||||
"success": {
|
||||
server: &stubAPIServer{requestStateDiskKeyResp: &pubproto.RequestStateDiskKeyResponse{}},
|
||||
listResponse: []core.Instance{defaultInstance},
|
||||
listResponse: []cloudtypes.Instance{defaultInstance},
|
||||
},
|
||||
"no error if server throws an error": {
|
||||
server: &stubAPIServer{
|
||||
requestStateDiskKeyResp: &pubproto.RequestStateDiskKeyResponse{},
|
||||
requestStateDiskKeyErr: errors.New("error"),
|
||||
},
|
||||
listResponse: []core.Instance{defaultInstance},
|
||||
listResponse: []cloudtypes.Instance{defaultInstance},
|
||||
},
|
||||
"no error if the server can not be reached": {
|
||||
server: &stubAPIServer{requestStateDiskKeyResp: &pubproto.RequestStateDiskKeyResponse{}},
|
||||
listResponse: []core.Instance{defaultInstance},
|
||||
listResponse: []cloudtypes.Instance{defaultInstance},
|
||||
dontStartServer: true,
|
||||
},
|
||||
"no error if no endpoint is available": {
|
||||
@ -54,13 +55,13 @@ func TestRequestKeyLoop(t *testing.T) {
|
||||
},
|
||||
"works for multiple endpoints": {
|
||||
server: &stubAPIServer{requestStateDiskKeyResp: &pubproto.RequestStateDiskKeyResponse{}},
|
||||
listResponse: []core.Instance{
|
||||
listResponse: []cloudtypes.Instance{
|
||||
defaultInstance,
|
||||
{
|
||||
Name: "test-instance-2",
|
||||
ProviderID: "/test/provider",
|
||||
Role: role.Coordinator,
|
||||
IPs: []string{"192.0.2.2"},
|
||||
PrivateIPs: []string{"192.0.2.2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -192,19 +193,15 @@ func (s *stubAPIServer) RequestStateDiskKey(ctx context.Context, in *pubproto.Re
|
||||
}
|
||||
|
||||
type stubMetadata struct {
|
||||
listResponse []core.Instance
|
||||
listResponse []cloudtypes.Instance
|
||||
}
|
||||
|
||||
func (s stubMetadata) List(ctx context.Context) ([]core.Instance, error) {
|
||||
func (s stubMetadata) List(ctx context.Context) ([]cloudtypes.Instance, error) {
|
||||
return s.listResponse, nil
|
||||
}
|
||||
|
||||
func (s stubMetadata) Self(ctx context.Context) (core.Instance, error) {
|
||||
return core.Instance{}, nil
|
||||
}
|
||||
|
||||
func (s stubMetadata) GetInstance(ctx context.Context, providerID string) (core.Instance, error) {
|
||||
return core.Instance{}, nil
|
||||
func (s stubMetadata) Self(ctx context.Context) (cloudtypes.Instance, error) {
|
||||
return cloudtypes.Instance{}, nil
|
||||
}
|
||||
|
||||
func (s stubMetadata) SignalRole(ctx context.Context, role role.Role) error {
|
||||
|
Loading…
Reference in New Issue
Block a user