mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-10-01 01:36:09 -04:00
[node operator] self-initialize resources
Signed-off-by: Malte Poll <mp@edgeless.systems>
This commit is contained in:
parent
1cee319174
commit
51cf638361
@ -53,8 +53,9 @@ spec:
|
||||
- mountPath: /etc/azure
|
||||
name: azureconfig
|
||||
readOnly: true
|
||||
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
- mountPath: /etc/gce
|
||||
name: gceconf
|
||||
readOnly: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
@ -71,6 +72,10 @@ spec:
|
||||
secret:
|
||||
secretName: azureconfig
|
||||
optional: true
|
||||
- name: gceconf
|
||||
configMap:
|
||||
name: gceconf
|
||||
optional: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
imagePullSecrets:
|
||||
|
@ -30,6 +30,8 @@ type scaleSetsAPI interface {
|
||||
BeginDeleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs armcomputev2.VirtualMachineScaleSetVMInstanceRequiredIDs,
|
||||
options *armcomputev2.VirtualMachineScaleSetsClientBeginDeleteInstancesOptions,
|
||||
) (*runtime.Poller[armcomputev2.VirtualMachineScaleSetsClientDeleteInstancesResponse], error)
|
||||
NewListPager(resourceGroupName string, options *armcomputev2.VirtualMachineScaleSetsClientListOptions,
|
||||
) *runtime.Pager[armcomputev2.VirtualMachineScaleSetsClientListResponse]
|
||||
}
|
||||
|
||||
type capacityPoller interface {
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
// Client is a client for the Azure Cloud.
|
||||
type Client struct {
|
||||
config cloudConfig
|
||||
scaleSetsAPI
|
||||
virtualMachineScaleSetVMsAPI
|
||||
capacityPollerGenerator func(resourceGroup, scaleSet string, wantedCapacity int64) capacityPoller
|
||||
@ -37,6 +38,7 @@ func NewFromDefault(configPath string) (*Client, error) {
|
||||
}
|
||||
|
||||
return &Client{
|
||||
config: *config,
|
||||
scaleSetsAPI: scaleSetAPI,
|
||||
virtualMachineScaleSetVMsAPI: virtualMachineScaleSetVMsAPI,
|
||||
capacityPollerGenerator: func(resourceGroup, scaleSet string, wantedCapacity int64) capacityPoller {
|
||||
|
@ -16,6 +16,7 @@ type stubScaleSetsAPI struct {
|
||||
deleteResponse armcomputev2.VirtualMachineScaleSetsClientDeleteInstancesResponse
|
||||
deleteErr error
|
||||
resultErr error
|
||||
pager *stubVMSSPager
|
||||
}
|
||||
|
||||
func (a *stubScaleSetsAPI) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string,
|
||||
@ -54,12 +55,20 @@ func (a *stubScaleSetsAPI) BeginDeleteInstances(ctx context.Context, resourceGro
|
||||
return poller, a.deleteErr
|
||||
}
|
||||
|
||||
func (a *stubScaleSetsAPI) NewListPager(resourceGroupName string, options *armcomputev2.VirtualMachineScaleSetsClientListOptions,
|
||||
) *runtime.Pager[armcomputev2.VirtualMachineScaleSetsClientListResponse] {
|
||||
return runtime.NewPager(runtime.PagingHandler[armcomputev2.VirtualMachineScaleSetsClientListResponse]{
|
||||
More: a.pager.moreFunc(),
|
||||
Fetcher: a.pager.fetcherFunc(),
|
||||
})
|
||||
}
|
||||
|
||||
type stubvirtualMachineScaleSetVMsAPI struct {
|
||||
scaleSetVM armcomputev2.VirtualMachineScaleSetVMsClientGetResponse
|
||||
getErr error
|
||||
instanceView armcomputev2.VirtualMachineScaleSetVMsClientGetInstanceViewResponse
|
||||
instanceViewErr error
|
||||
pager *stubPager
|
||||
pager *stubVMSSVMPager
|
||||
}
|
||||
|
||||
func (a *stubvirtualMachineScaleSetVMsAPI) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string,
|
||||
@ -102,19 +111,19 @@ func (p *stubPoller[T]) Result(ctx context.Context, out *T) error {
|
||||
return p.resultErr
|
||||
}
|
||||
|
||||
type stubPager struct {
|
||||
type stubVMSSVMPager struct {
|
||||
list []armcomputev2.VirtualMachineScaleSetVM
|
||||
fetchErr error
|
||||
more bool
|
||||
}
|
||||
|
||||
func (p *stubPager) moreFunc() func(armcomputev2.VirtualMachineScaleSetVMsClientListResponse) bool {
|
||||
func (p *stubVMSSVMPager) moreFunc() func(armcomputev2.VirtualMachineScaleSetVMsClientListResponse) bool {
|
||||
return func(armcomputev2.VirtualMachineScaleSetVMsClientListResponse) bool {
|
||||
return p.more
|
||||
}
|
||||
}
|
||||
|
||||
func (p *stubPager) fetcherFunc() func(context.Context, *armcomputev2.VirtualMachineScaleSetVMsClientListResponse) (armcomputev2.VirtualMachineScaleSetVMsClientListResponse, error) {
|
||||
func (p *stubVMSSVMPager) fetcherFunc() func(context.Context, *armcomputev2.VirtualMachineScaleSetVMsClientListResponse) (armcomputev2.VirtualMachineScaleSetVMsClientListResponse, error) {
|
||||
return func(context.Context, *armcomputev2.VirtualMachineScaleSetVMsClientListResponse) (armcomputev2.VirtualMachineScaleSetVMsClientListResponse, error) {
|
||||
page := make([]*armcomputev2.VirtualMachineScaleSetVM, len(p.list))
|
||||
for i := range p.list {
|
||||
@ -127,3 +136,29 @@ func (p *stubPager) fetcherFunc() func(context.Context, *armcomputev2.VirtualMac
|
||||
}, p.fetchErr
|
||||
}
|
||||
}
|
||||
|
||||
type stubVMSSPager struct {
|
||||
list []armcomputev2.VirtualMachineScaleSet
|
||||
fetchErr error
|
||||
more bool
|
||||
}
|
||||
|
||||
func (p *stubVMSSPager) moreFunc() func(armcomputev2.VirtualMachineScaleSetsClientListResponse) bool {
|
||||
return func(armcomputev2.VirtualMachineScaleSetsClientListResponse) bool {
|
||||
return p.more
|
||||
}
|
||||
}
|
||||
|
||||
func (p *stubVMSSPager) fetcherFunc() func(context.Context, *armcomputev2.VirtualMachineScaleSetsClientListResponse) (armcomputev2.VirtualMachineScaleSetsClientListResponse, error) {
|
||||
return func(context.Context, *armcomputev2.VirtualMachineScaleSetsClientListResponse) (armcomputev2.VirtualMachineScaleSetsClientListResponse, error) {
|
||||
page := make([]*armcomputev2.VirtualMachineScaleSet, len(p.list))
|
||||
for i := range p.list {
|
||||
page[i] = &p.list[i]
|
||||
}
|
||||
return armcomputev2.VirtualMachineScaleSetsClientListResponse{
|
||||
VirtualMachineScaleSetListResult: armcomputev2.VirtualMachineScaleSetListResult{
|
||||
Value: page,
|
||||
},
|
||||
}, p.fetchErr
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
type cloudConfig struct {
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
SubscriptionID string `json:"subscriptionId,omitempty"`
|
||||
ResourceGroup string `json:"resourceGroup,omitempty"`
|
||||
}
|
||||
|
||||
// loadConfig loads the cloud config from the given path.
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// this state is included in most VMs but not needed
|
||||
// to determine the node state as every provisioned VM also has a power state
|
||||
// to determine the node state as every provisioned VM also has a power state.
|
||||
const provisioningStateSucceeded = "ProvisioningState/succeeded"
|
||||
|
||||
func TestNodeStateFromStatuses(t *testing.T) {
|
||||
|
@ -199,7 +199,7 @@ func TestCreateNode(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
pager := &stubPager{
|
||||
pager := &stubVMSSVMPager{
|
||||
list: tc.preexistingVMs,
|
||||
fetchErr: tc.fetchErr,
|
||||
}
|
||||
@ -325,7 +325,7 @@ func TestCapacityPollingHandler(t *testing.T) {
|
||||
assert.Error(handler.Poll(context.Background()))
|
||||
|
||||
// let Poll finish
|
||||
handler.scaleSetsAPI.(*stubScaleSetsAPI).scaleSet.SKU = &armcomputev2.SKU{Capacity: to.Ptr(int64(wantCapacity))}
|
||||
handler.scaleSetsAPI.(*stubScaleSetsAPI).scaleSet.SKU = &armcomputev2.SKU{Capacity: to.Ptr(wantCapacity)}
|
||||
assert.NoError(handler.Poll(context.Background()))
|
||||
assert.True(handler.Done())
|
||||
assert.NoError(handler.Result(context.Background(), &gotCapacity))
|
||||
|
@ -0,0 +1,26 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// getScaleSets retrieves the IDs of all scale sets of a resource group.
|
||||
func (c *Client) getScaleSets(ctx context.Context) ([]string, error) {
|
||||
pager := c.scaleSetsAPI.NewListPager(c.config.ResourceGroup, nil)
|
||||
var scaleSets []string
|
||||
|
||||
for pager.More() {
|
||||
page, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("paging scale sets: %w", err)
|
||||
}
|
||||
for _, scaleSet := range page.Value {
|
||||
if scaleSet == nil || scaleSet.ID == nil {
|
||||
continue
|
||||
}
|
||||
scaleSets = append(scaleSets, *scaleSet.ID)
|
||||
}
|
||||
}
|
||||
return scaleSets, nil
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
armcomputev2 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetScaleSets(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
scaleSet armcomputev2.VirtualMachineScaleSet
|
||||
fetchPageErr error
|
||||
wantScaleSets []string
|
||||
wantErr bool
|
||||
}{
|
||||
"fetching scale sets works": {
|
||||
scaleSet: armcomputev2.VirtualMachineScaleSet{
|
||||
ID: to.Ptr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name"),
|
||||
},
|
||||
wantScaleSets: []string{"/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name"},
|
||||
},
|
||||
"fetching scale sets fails": {
|
||||
fetchPageErr: errors.New("fetch page error"),
|
||||
wantErr: true,
|
||||
},
|
||||
"scale set is invalid": {},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{
|
||||
scaleSetsAPI: &stubScaleSetsAPI{
|
||||
pager: &stubVMSSPager{
|
||||
list: []armcomputev2.VirtualMachineScaleSet{tc.scaleSet},
|
||||
fetchErr: tc.fetchPageErr,
|
||||
},
|
||||
},
|
||||
}
|
||||
gotScaleSets, err := client.getScaleSets(context.Background())
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.ElementsMatch(tc.wantScaleSets, gotScaleSets)
|
||||
})
|
||||
}
|
||||
}
|
@ -3,6 +3,7 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v2"
|
||||
)
|
||||
@ -52,3 +53,32 @@ func (c *Client) SetScalingGroupImage(ctx context.Context, scalingGroupID, image
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetScalingGroupName retrieves the name of a scaling group.
|
||||
func (c *Client) GetScalingGroupName(ctx context.Context, scalingGroupID string) (string, error) {
|
||||
_, _, scaleSet, err := splitVMSSID(scalingGroupID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting scaling group name: %w", err)
|
||||
}
|
||||
return strings.ToLower(scaleSet), nil
|
||||
}
|
||||
|
||||
// ListScalingGroups retrieves a list of scaling groups for the cluster.
|
||||
func (c *Client) ListScalingGroups(ctx context.Context, uid string) (controlPlaneGroupIDs []string, workerGroupIDs []string, err error) {
|
||||
scaleSetIDs, err := c.getScaleSets(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("listing scaling groups: %w", err)
|
||||
}
|
||||
for _, scaleSetID := range scaleSetIDs {
|
||||
_, _, scaleSet, err := splitVMSSID(scaleSetID)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("getting scaling group name: %w", err)
|
||||
}
|
||||
if scaleSet == "constellation-scale-set-controlplanes-"+uid {
|
||||
controlPlaneGroupIDs = append(controlPlaneGroupIDs, scaleSetID)
|
||||
} else if strings.HasPrefix(scaleSet, "constellation-scale-set-workers-"+uid) {
|
||||
workerGroupIDs = append(workerGroupIDs, scaleSetID)
|
||||
}
|
||||
}
|
||||
return controlPlaneGroupIDs, workerGroupIDs, nil
|
||||
}
|
||||
|
@ -123,3 +123,97 @@ func TestSetScalingGroupImage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetScalingGroupName(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
scalingGroupID string
|
||||
wantName string
|
||||
wantErr bool
|
||||
}{
|
||||
"getting name works": {
|
||||
scalingGroupID: "/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/scale-set-name",
|
||||
wantName: "scale-set-name",
|
||||
},
|
||||
"uppercase name is lowercased": {
|
||||
scalingGroupID: "/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/SCALE-SET-NAME",
|
||||
wantName: "scale-set-name",
|
||||
},
|
||||
"splitting scalingGroupID fails": {
|
||||
scalingGroupID: "invalid",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{}
|
||||
gotName, err := client.GetScalingGroupName(context.Background(), tc.scalingGroupID)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantName, gotName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListScalingGroups(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
scaleSet armcomputev2.VirtualMachineScaleSet
|
||||
fetchPageErr error
|
||||
wantControlPlanes []string
|
||||
wantWorkers []string
|
||||
wantErr bool
|
||||
}{
|
||||
"listing control-plane works": {
|
||||
scaleSet: armcomputev2.VirtualMachineScaleSet{
|
||||
ID: to.Ptr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/constellation-scale-set-controlplanes-uid"),
|
||||
},
|
||||
wantControlPlanes: []string{"/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/constellation-scale-set-controlplanes-uid"},
|
||||
},
|
||||
"listing worker works": {
|
||||
scaleSet: armcomputev2.VirtualMachineScaleSet{
|
||||
ID: to.Ptr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/constellation-scale-set-workers-uid"),
|
||||
},
|
||||
wantWorkers: []string{"/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/constellation-scale-set-workers-uid"},
|
||||
},
|
||||
"listing other works": {
|
||||
scaleSet: armcomputev2.VirtualMachineScaleSet{
|
||||
ID: to.Ptr("/subscriptions/subscription-id/resourceGroups/resource-group/providers/Microsoft.Compute/virtualMachineScaleSets/other"),
|
||||
},
|
||||
},
|
||||
"fetching scale sets fails": {
|
||||
fetchPageErr: errors.New("fetch page error"),
|
||||
wantErr: true,
|
||||
},
|
||||
"scale set is invalid": {},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{
|
||||
scaleSetsAPI: &stubScaleSetsAPI{
|
||||
pager: &stubVMSSPager{
|
||||
list: []armcomputev2.VirtualMachineScaleSet{tc.scaleSet},
|
||||
fetchErr: tc.fetchPageErr,
|
||||
},
|
||||
},
|
||||
}
|
||||
gotControlPlanes, gotWorkers, err := client.ListScalingGroups(context.Background(), "uid")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.ElementsMatch(tc.wantControlPlanes, gotControlPlanes)
|
||||
assert.ElementsMatch(tc.wantWorkers, gotWorkers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
var vmssIDRegexp = regexp.MustCompile(`^/subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)$`)
|
||||
|
||||
// joinVMSSID joins scale set parameters to generate a virtual machine scale set (VMSS) ID.
|
||||
// Format: /subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Compute/virtualMachineScaleSets/<scale-set>
|
||||
// Format: /subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Compute/virtualMachineScaleSets/<scale-set> .
|
||||
func joinVMSSID(subscriptionID, resourceGroup, scaleSet string) string {
|
||||
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s", subscriptionID, resourceGroup, scaleSet)
|
||||
}
|
||||
|
@ -0,0 +1,8 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
AutoscalingStrategyResourceName = "autoscalingstrategy"
|
||||
NodeImageResourceName = "constellation-coreos"
|
||||
ControlPlaneScalingGroupResourceName = "scalinggroup-controlplane"
|
||||
WorkerScalingGroupResourceName = "scalinggroup-worker"
|
||||
)
|
122
operators/constellation-node-operator/internal/deploy/deploy.go
Normal file
122
operators/constellation-node-operator/internal/deploy/deploy.go
Normal file
@ -0,0 +1,122 @@
|
||||
// Package deploy provides functions to deploy initial resources for the node operator.
|
||||
package deploy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/api/v1alpha1"
|
||||
"github.com/edgelesssys/constellation/operators/constellation-node-operator/internal/constants"
|
||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// InitialResources creates the initial resources for the node operator.
|
||||
func InitialResources(ctx context.Context, k8sClient client.Writer, scalingGroupGetter scalingGroupGetter, uid string) error {
|
||||
controlPlaneGroupIDs, workerGroupIDs, err := scalingGroupGetter.ListScalingGroups(ctx, uid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing scaling groups: %w", err)
|
||||
}
|
||||
if len(controlPlaneGroupIDs) == 0 {
|
||||
return errors.New("determining initial node image: no control plane scaling group found")
|
||||
}
|
||||
if len(workerGroupIDs) == 0 {
|
||||
return errors.New("determining initial node image: no worker scaling group found")
|
||||
}
|
||||
|
||||
if err := createAutoscalingStrategy(ctx, k8sClient); err != nil {
|
||||
return fmt.Errorf("creating initial autoscaling strategy: %w", err)
|
||||
}
|
||||
imageReference, err := scalingGroupGetter.GetScalingGroupImage(ctx, controlPlaneGroupIDs[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining initial node image: %w", err)
|
||||
}
|
||||
if err := createNodeImage(ctx, k8sClient, imageReference); err != nil {
|
||||
return fmt.Errorf("creating initial node image %q: %w", imageReference, err)
|
||||
}
|
||||
for _, groupID := range controlPlaneGroupIDs {
|
||||
groupName, err := scalingGroupGetter.GetScalingGroupName(ctx, groupID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining scaling group name of %q: %w", groupID, err)
|
||||
}
|
||||
if err := createScalingGroup(ctx, k8sClient, groupID, groupName, false); err != nil {
|
||||
return fmt.Errorf("creating initial control plane scaling group: %w", err)
|
||||
}
|
||||
}
|
||||
for _, groupID := range workerGroupIDs {
|
||||
groupName, err := scalingGroupGetter.GetScalingGroupName(ctx, groupID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining scaling group name of %q: %w", groupID, err)
|
||||
}
|
||||
if err := createScalingGroup(ctx, k8sClient, groupID, groupName, true); err != nil {
|
||||
return fmt.Errorf("creating initial worker scaling group: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createAutoscalingStrategy creates the autoscaling strategy resource if it does not exist yet.
|
||||
func createAutoscalingStrategy(ctx context.Context, k8sClient client.Writer) error {
|
||||
err := k8sClient.Create(ctx, &updatev1alpha1.AutoscalingStrategy{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "AutoscalingStrategy"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.AutoscalingStrategyResourceName,
|
||||
},
|
||||
Spec: updatev1alpha1.AutoscalingStrategySpec{
|
||||
Enabled: true,
|
||||
DeploymentName: "constellation-cluster-autoscaler",
|
||||
DeploymentNamespace: "kube-system",
|
||||
},
|
||||
})
|
||||
if k8sErrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createNodeImage creates the initial nodeimage resource if it does not exist yet.
|
||||
func createNodeImage(ctx context.Context, k8sClient client.Writer, imageReference string) error {
|
||||
err := k8sClient.Create(ctx, &updatev1alpha1.NodeImage{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeImage"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.NodeImageResourceName,
|
||||
},
|
||||
Spec: updatev1alpha1.NodeImageSpec{
|
||||
ImageReference: imageReference,
|
||||
},
|
||||
})
|
||||
if k8sErrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createScalingGroup creates an initial scaling group resource if it does not exist yet.
|
||||
func createScalingGroup(ctx context.Context, k8sClient client.Writer, groupID, groupName string, autoscaling bool) error {
|
||||
err := k8sClient.Create(ctx, &updatev1alpha1.ScalingGroup{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "ScalingGroup"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: groupName,
|
||||
},
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
NodeImage: constants.NodeImageResourceName,
|
||||
GroupID: groupID,
|
||||
Autoscaling: autoscaling,
|
||||
},
|
||||
})
|
||||
if k8sErrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type scalingGroupGetter interface {
|
||||
// GetScalingGroupImage retrieves the image currently used by a scaling group.
|
||||
GetScalingGroupImage(ctx context.Context, scalingGroupID string) (string, error)
|
||||
// GetScalingGroupName retrieves the name of a scaling group.
|
||||
GetScalingGroupName(ctx context.Context, scalingGroupID string) (string, error)
|
||||
// ListScalingGroups retrieves a list of scaling groups for the cluster.
|
||||
ListScalingGroups(ctx context.Context, uid string) (controlPlaneGroupIDs []string, workerGroupIDs []string, err error)
|
||||
}
|
@ -0,0 +1,317 @@
|
||||
package deploy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/api/v1alpha1"
|
||||
"github.com/edgelesssys/constellation/operators/constellation-node-operator/internal/constants"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func TestInitialResources(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
items []scalingGroupStoreItem
|
||||
imageErr error
|
||||
nameErr error
|
||||
listErr error
|
||||
createErr error
|
||||
wantResources int
|
||||
wantErr bool
|
||||
}{
|
||||
"creating initial resources works": {
|
||||
items: []scalingGroupStoreItem{
|
||||
{groupID: "control-plane", image: "image-1", name: "control-plane", isControlPlane: true},
|
||||
{groupID: "worker", image: "image-1", name: "worker"},
|
||||
},
|
||||
wantResources: 4,
|
||||
},
|
||||
"missing control planes": {
|
||||
items: []scalingGroupStoreItem{
|
||||
{groupID: "worker", image: "image-1", name: "worker"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"missing workers": {
|
||||
items: []scalingGroupStoreItem{
|
||||
{groupID: "control-plane", image: "image-1", name: "control-plane", isControlPlane: true},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
"listing groups fails": {
|
||||
listErr: errors.New("list failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
"creating resources fails": {
|
||||
items: []scalingGroupStoreItem{
|
||||
{groupID: "control-plane", image: "image-1", name: "control-plane", isControlPlane: true},
|
||||
{groupID: "worker", image: "image-1", name: "worker"},
|
||||
},
|
||||
createErr: errors.New("create failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
"getting image fails": {
|
||||
items: []scalingGroupStoreItem{
|
||||
{groupID: "control-plane", image: "image-1", name: "control-plane", isControlPlane: true},
|
||||
{groupID: "worker", image: "image-1", name: "worker"},
|
||||
},
|
||||
imageErr: errors.New("getting image failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
"getting name fails": {
|
||||
items: []scalingGroupStoreItem{
|
||||
{groupID: "control-plane", image: "image-1", name: "control-plane", isControlPlane: true},
|
||||
{groupID: "worker", image: "image-1", name: "worker"},
|
||||
},
|
||||
nameErr: errors.New("getting name failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
k8sClient := &stubK8sClient{createErr: tc.createErr}
|
||||
scalingGroupGetter := newScalingGroupGetter(tc.items, tc.imageErr, tc.nameErr, tc.listErr)
|
||||
err := InitialResources(context.Background(), k8sClient, scalingGroupGetter, "uid")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Len(k8sClient.createdObjects, tc.wantResources)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateAutoscalingStrategy(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
createErr error
|
||||
wantStrategy *updatev1alpha1.AutoscalingStrategy
|
||||
wantErr bool
|
||||
}{
|
||||
"create works": {
|
||||
wantStrategy: &updatev1alpha1.AutoscalingStrategy{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "AutoscalingStrategy"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.AutoscalingStrategyResourceName,
|
||||
},
|
||||
Spec: updatev1alpha1.AutoscalingStrategySpec{
|
||||
Enabled: true,
|
||||
DeploymentName: "constellation-cluster-autoscaler",
|
||||
DeploymentNamespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
"create fails": {
|
||||
createErr: errors.New("create failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
"strategy exists": {
|
||||
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, constants.AutoscalingStrategyResourceName),
|
||||
wantStrategy: &updatev1alpha1.AutoscalingStrategy{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "AutoscalingStrategy"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.AutoscalingStrategyResourceName,
|
||||
},
|
||||
Spec: updatev1alpha1.AutoscalingStrategySpec{
|
||||
Enabled: true,
|
||||
DeploymentName: "constellation-cluster-autoscaler",
|
||||
DeploymentNamespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
k8sClient := &stubK8sClient{createErr: tc.createErr}
|
||||
err := createAutoscalingStrategy(context.Background(), k8sClient)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Len(k8sClient.createdObjects, 1)
|
||||
assert.Equal(tc.wantStrategy, k8sClient.createdObjects[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateNodeImage(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
createErr error
|
||||
wantNodeImage *updatev1alpha1.NodeImage
|
||||
wantErr bool
|
||||
}{
|
||||
"create works": {
|
||||
wantNodeImage: &updatev1alpha1.NodeImage{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeImage"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.NodeImageResourceName,
|
||||
},
|
||||
Spec: updatev1alpha1.NodeImageSpec{
|
||||
ImageReference: "image-reference",
|
||||
},
|
||||
},
|
||||
},
|
||||
"create fails": {
|
||||
createErr: errors.New("create failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
"image exists": {
|
||||
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, constants.AutoscalingStrategyResourceName),
|
||||
wantNodeImage: &updatev1alpha1.NodeImage{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeImage"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.NodeImageResourceName,
|
||||
},
|
||||
Spec: updatev1alpha1.NodeImageSpec{
|
||||
ImageReference: "image-reference",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
k8sClient := &stubK8sClient{createErr: tc.createErr}
|
||||
err := createNodeImage(context.Background(), k8sClient, "image-reference")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Len(k8sClient.createdObjects, 1)
|
||||
assert.Equal(tc.wantNodeImage, k8sClient.createdObjects[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateScalingGroup(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
createErr error
|
||||
wantScalingGroup *updatev1alpha1.ScalingGroup
|
||||
wantErr bool
|
||||
}{
|
||||
"create works": {
|
||||
wantScalingGroup: &updatev1alpha1.ScalingGroup{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "ScalingGroup"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "group-name",
|
||||
},
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
NodeImage: constants.NodeImageResourceName,
|
||||
GroupID: "group-id",
|
||||
Autoscaling: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"create fails": {
|
||||
createErr: errors.New("create failed"),
|
||||
wantErr: true,
|
||||
},
|
||||
"image exists": {
|
||||
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, constants.AutoscalingStrategyResourceName),
|
||||
wantScalingGroup: &updatev1alpha1.ScalingGroup{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "ScalingGroup"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "group-name",
|
||||
},
|
||||
Spec: updatev1alpha1.ScalingGroupSpec{
|
||||
NodeImage: constants.NodeImageResourceName,
|
||||
GroupID: "group-id",
|
||||
Autoscaling: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
k8sClient := &stubK8sClient{createErr: tc.createErr}
|
||||
err := createScalingGroup(context.Background(), k8sClient, "group-id", "group-name", true)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Len(k8sClient.createdObjects, 1)
|
||||
assert.Equal(tc.wantScalingGroup, k8sClient.createdObjects[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type stubK8sClient struct {
|
||||
createdObjects []client.Object
|
||||
createErr error
|
||||
client.Writer
|
||||
}
|
||||
|
||||
func (s *stubK8sClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
|
||||
s.createdObjects = append(s.createdObjects, obj)
|
||||
return s.createErr
|
||||
}
|
||||
|
||||
type stubScalingGroupGetter struct {
|
||||
store map[string]scalingGroupStoreItem
|
||||
imageErr error
|
||||
nameErr error
|
||||
listErr error
|
||||
}
|
||||
|
||||
func newScalingGroupGetter(items []scalingGroupStoreItem, imageErr, nameErr, listErr error) *stubScalingGroupGetter {
|
||||
store := make(map[string]scalingGroupStoreItem)
|
||||
for _, item := range items {
|
||||
store[item.groupID] = item
|
||||
}
|
||||
return &stubScalingGroupGetter{
|
||||
store: store,
|
||||
imageErr: imageErr,
|
||||
nameErr: nameErr,
|
||||
listErr: listErr,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *stubScalingGroupGetter) GetScalingGroupImage(ctx context.Context, scalingGroupID string) (string, error) {
|
||||
return g.store[scalingGroupID].image, g.imageErr
|
||||
}
|
||||
|
||||
func (g *stubScalingGroupGetter) GetScalingGroupName(ctx context.Context, scalingGroupID string) (string, error) {
|
||||
return g.store[scalingGroupID].name, g.nameErr
|
||||
}
|
||||
|
||||
func (g *stubScalingGroupGetter) ListScalingGroups(ctx context.Context, uid string) (controlPlaneGroupIDs []string, workerGroupIDs []string, err error) {
|
||||
for _, item := range g.store {
|
||||
if item.isControlPlane {
|
||||
controlPlaneGroupIDs = append(controlPlaneGroupIDs, item.groupID)
|
||||
} else {
|
||||
workerGroupIDs = append(workerGroupIDs, item.groupID)
|
||||
}
|
||||
}
|
||||
return controlPlaneGroupIDs, workerGroupIDs, g.listErr
|
||||
}
|
||||
|
||||
type scalingGroupStoreItem struct {
|
||||
groupID string
|
||||
name string
|
||||
image string
|
||||
isControlPlane bool
|
||||
}
|
@ -3,6 +3,7 @@ package client
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "cloud.google.com/go/compute/apiv1"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
|
||||
)
|
||||
@ -27,6 +28,8 @@ type instanceGroupManagersAPI interface {
|
||||
Close() error
|
||||
Get(ctx context.Context, req *computepb.GetInstanceGroupManagerRequest,
|
||||
opts ...gax.CallOption) (*computepb.InstanceGroupManager, error)
|
||||
AggregatedList(ctx context.Context, req *computepb.AggregatedListInstanceGroupManagersRequest,
|
||||
opts ...gax.CallOption) InstanceGroupManagerScopedListIterator
|
||||
SetInstanceTemplate(ctx context.Context, req *computepb.SetInstanceTemplateInstanceGroupManagerRequest,
|
||||
opts ...gax.CallOption) (Operation, error)
|
||||
CreateInstances(ctx context.Context, req *computepb.CreateInstancesInstanceGroupManagerRequest,
|
||||
@ -47,6 +50,10 @@ type Operation interface {
|
||||
Wait(ctx context.Context, opts ...gax.CallOption) error
|
||||
}
|
||||
|
||||
type InstanceGroupManagerScopedListIterator interface {
|
||||
Next() (compute.InstanceGroupManagersScopedListPair, error)
|
||||
}
|
||||
|
||||
type prng interface {
|
||||
// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). It panics if n <= 0.
|
||||
Intn(n int) int
|
||||
|
@ -6,11 +6,13 @@ import (
|
||||
"time"
|
||||
|
||||
compute "cloud.google.com/go/compute/apiv1"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
// Client is a client for the Google Compute Engine.
|
||||
type Client struct {
|
||||
projectID string
|
||||
instanceAPI
|
||||
instanceTemplateAPI
|
||||
instanceGroupManagersAPI
|
||||
@ -20,7 +22,12 @@ type Client struct {
|
||||
}
|
||||
|
||||
// New creates a new client for the Google Compute Engine.
|
||||
func New(ctx context.Context) (*Client, error) {
|
||||
func New(ctx context.Context, configPath string) (*Client, error) {
|
||||
projectID, err := loadProjectID(afero.NewOsFs(), configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var closers []closer
|
||||
insAPI, err := compute.NewInstancesRESTClient(ctx)
|
||||
if err != nil {
|
||||
@ -44,8 +51,8 @@ func New(ctx context.Context) (*Client, error) {
|
||||
_ = closeAll(closers)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{
|
||||
projectID: projectID,
|
||||
instanceAPI: insAPI,
|
||||
instanceTemplateAPI: &instanceTemplateClient{templAPI},
|
||||
instanceGroupManagersAPI: &instanceGroupManagersClient{groupAPI},
|
||||
|
@ -3,7 +3,9 @@ package client
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "cloud.google.com/go/compute/apiv1"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@ -63,6 +65,7 @@ func (a stubInstanceTemplateAPI) Insert(ctx context.Context, req *computepb.Inse
|
||||
type stubInstanceGroupManagersAPI struct {
|
||||
instanceGroupManager *computepb.InstanceGroupManager
|
||||
getErr error
|
||||
aggregatedListErr error
|
||||
setInstanceTemplateErr error
|
||||
createInstancesErr error
|
||||
deleteInstancesErr error
|
||||
@ -78,6 +81,24 @@ func (a stubInstanceGroupManagersAPI) Get(ctx context.Context, req *computepb.Ge
|
||||
return a.instanceGroupManager, a.getErr
|
||||
}
|
||||
|
||||
func (a stubInstanceGroupManagersAPI) AggregatedList(ctx context.Context, req *computepb.AggregatedListInstanceGroupManagersRequest,
|
||||
opts ...gax.CallOption,
|
||||
) InstanceGroupManagerScopedListIterator {
|
||||
return &stubInstanceGroupManagerScopedListIterator{
|
||||
pairs: []compute.InstanceGroupManagersScopedListPair{
|
||||
{
|
||||
Key: "key",
|
||||
Value: &computepb.InstanceGroupManagersScopedList{
|
||||
InstanceGroupManagers: []*computepb.InstanceGroupManager{
|
||||
a.instanceGroupManager,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nextErr: a.aggregatedListErr,
|
||||
}
|
||||
}
|
||||
|
||||
func (a stubInstanceGroupManagersAPI) SetInstanceTemplate(ctx context.Context, req *computepb.SetInstanceTemplateInstanceGroupManagerRequest,
|
||||
opts ...gax.CallOption,
|
||||
) (Operation, error) {
|
||||
@ -141,3 +162,22 @@ func (o *stubOperation) Done() bool {
|
||||
func (o *stubOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type stubInstanceGroupManagerScopedListIterator struct {
|
||||
pairs []compute.InstanceGroupManagersScopedListPair
|
||||
nextErr error
|
||||
|
||||
internalCounter int
|
||||
}
|
||||
|
||||
func (i *stubInstanceGroupManagerScopedListIterator) Next() (compute.InstanceGroupManagersScopedListPair, error) {
|
||||
if i.nextErr != nil {
|
||||
return compute.InstanceGroupManagersScopedListPair{}, i.nextErr
|
||||
}
|
||||
if i.internalCounter >= len(i.pairs) {
|
||||
return compute.InstanceGroupManagersScopedListPair{}, iterator.Done
|
||||
}
|
||||
pair := i.pairs[i.internalCounter]
|
||||
i.internalCounter++
|
||||
return pair, nil
|
||||
}
|
||||
|
@ -0,0 +1,24 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var projectIDRegex = regexp.MustCompile(`(?m)^project-id = (.*)$`)
|
||||
|
||||
// loadProjectID loads the project id from the gce config file.
|
||||
func loadProjectID(fs afero.Fs, path string) (string, error) {
|
||||
rawConfig, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// find project-id line
|
||||
matches := projectIDRegex.FindStringSubmatch(string(rawConfig))
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("invalid config: project-id not found")
|
||||
}
|
||||
return matches[1], nil
|
||||
}
|
@ -0,0 +1,53 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLoadProjectID(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
rawConfig string
|
||||
skipWrite bool
|
||||
wantProjectID string
|
||||
wantErr bool
|
||||
}{
|
||||
"valid config": {
|
||||
rawConfig: `project-id = project-id`,
|
||||
wantProjectID: "project-id",
|
||||
},
|
||||
"invalid config": {
|
||||
rawConfig: `x = y`,
|
||||
wantErr: true,
|
||||
},
|
||||
"config is empty": {
|
||||
wantErr: true,
|
||||
},
|
||||
"config does not exist": {
|
||||
skipWrite: true,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
if !tc.skipWrite {
|
||||
require.NoError(afero.WriteFile(fs, "gce.conf", []byte(tc.rawConfig), 0o644))
|
||||
}
|
||||
gotProjectID, err := loadProjectID(fs, "gce.conf")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantProjectID, gotProjectID)
|
||||
})
|
||||
}
|
||||
}
|
@ -42,6 +42,12 @@ func (c *instanceGroupManagersClient) Get(ctx context.Context, req *computepb.Ge
|
||||
return c.InstanceGroupManagersClient.Get(ctx, req, opts...)
|
||||
}
|
||||
|
||||
func (c *instanceGroupManagersClient) AggregatedList(ctx context.Context, req *computepb.AggregatedListInstanceGroupManagersRequest,
|
||||
opts ...gax.CallOption,
|
||||
) InstanceGroupManagerScopedListIterator {
|
||||
return c.InstanceGroupManagersClient.AggregatedList(ctx, req, opts...)
|
||||
}
|
||||
|
||||
func (c *instanceGroupManagersClient) SetInstanceTemplate(ctx context.Context, req *computepb.SetInstanceTemplateInstanceGroupManagerRequest,
|
||||
opts ...gax.CallOption,
|
||||
) (Operation, error) {
|
||||
|
@ -5,7 +5,11 @@ import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var instanceGroupIDRegex = regexp.MustCompile(`^projects/([^/]+)/zones/([^/]+)/instanceGroupManagers/([^/]+)$`)
|
||||
var (
|
||||
instanceGroupIDRegex = regexp.MustCompile(`^projects/([^/]+)/zones/([^/]+)/instanceGroupManagers/([^/]+)$`)
|
||||
controlPlaneInstanceGroupNameRegex = regexp.MustCompile(`^(.*)control-plane(.*)$`)
|
||||
workerInstanceGroupNameRegex = regexp.MustCompile(`^(.*)worker(.*)$`)
|
||||
)
|
||||
|
||||
// splitInstanceGroupID splits an instance group ID into core components.
|
||||
func splitInstanceGroupID(instanceGroupID string) (project, zone, instanceGroup string, err error) {
|
||||
@ -16,8 +20,18 @@ func splitInstanceGroupID(instanceGroupID string) (project, zone, instanceGroup
|
||||
return matches[1], matches[2], matches[3], nil
|
||||
}
|
||||
|
||||
// isControlPlaneInstanceGroup returns true if the instance group is a control plane instance group.
|
||||
func isControlPlaneInstanceGroup(instanceGroupName string) bool {
|
||||
return controlPlaneInstanceGroupNameRegex.MatchString(instanceGroupName)
|
||||
}
|
||||
|
||||
// isWorkerInstanceGroup returns true if the instance group is a worker instance group.
|
||||
func isWorkerInstanceGroup(instanceGroupName string) bool {
|
||||
return workerInstanceGroupNameRegex.MatchString(instanceGroupName)
|
||||
}
|
||||
|
||||
// generateInstanceName generates a random instance name.
|
||||
func generateInstanceName(baseInstanceName string, random prng) (string, error) {
|
||||
func generateInstanceName(baseInstanceName string, random prng) string {
|
||||
letters := []byte("abcdefghijklmnopqrstuvwxyz0123456789")
|
||||
const uidLen = 4
|
||||
uid := make([]byte, 0, uidLen)
|
||||
@ -25,5 +39,5 @@ func generateInstanceName(baseInstanceName string, random prng) (string, error)
|
||||
n := random.Intn(len(letters))
|
||||
uid = append(uid, letters[n])
|
||||
}
|
||||
return baseInstanceName + "-" + string(uid), nil
|
||||
return baseInstanceName + "-" + string(uid)
|
||||
}
|
||||
|
@ -49,21 +49,17 @@ func TestSplitInstanceGroupID(t *testing.T) {
|
||||
|
||||
func TestGenerateInstanceName(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
baseInstanceName := "base"
|
||||
gotInstanceName, err := generateInstanceName(baseInstanceName, &stubRng{result: 0})
|
||||
require.NoError(err)
|
||||
gotInstanceName := generateInstanceName(baseInstanceName, &stubRng{result: 0})
|
||||
assert.Equal("base-aaaa", gotInstanceName)
|
||||
}
|
||||
|
||||
func TestGenerateInstanceNameRandomTest(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
instanceNameRegexp := regexp.MustCompile(`^base-[0-9a-z]{4}$`)
|
||||
baseInstanceName := "base"
|
||||
random := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
|
||||
gotInstanceName, err := generateInstanceName(baseInstanceName, random)
|
||||
require.NoError(err)
|
||||
gotInstanceName := generateInstanceName(baseInstanceName, random)
|
||||
assert.Regexp(instanceNameRegexp, gotInstanceName)
|
||||
}
|
||||
|
||||
|
@ -81,10 +81,7 @@ func (c *Client) CreateNode(ctx context.Context, scalingGroupID string) (nodeNam
|
||||
if instanceGroupManager.BaseInstanceName == nil {
|
||||
return "", "", fmt.Errorf("instance group manager %q has no base instance name", instanceGroupName)
|
||||
}
|
||||
instanceName, err := generateInstanceName(*instanceGroupManager.BaseInstanceName, c.prng)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
instanceName := generateInstanceName(*instanceGroupManager.BaseInstanceName, c.prng)
|
||||
op, err := c.instanceGroupManagersAPI.CreateInstances(ctx, &computepb.CreateInstancesInstanceGroupManagerRequest{
|
||||
InstanceGroupManager: instanceGroupName,
|
||||
Project: project,
|
||||
|
@ -4,8 +4,11 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/iterator"
|
||||
computepb "google.golang.org/genproto/googleapis/cloud/compute/v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// GetScalingGroupImage returns the image URI of the scaling group.
|
||||
@ -77,6 +80,47 @@ func (c *Client) SetScalingGroupImage(ctx context.Context, scalingGroupID, image
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetScalingGroupName retrieves the name of a scaling group.
|
||||
func (c *Client) GetScalingGroupName(ctx context.Context, scalingGroupID string) (string, error) {
|
||||
_, _, instanceGroupName, err := splitInstanceGroupID(scalingGroupID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting scaling group name: %w", err)
|
||||
}
|
||||
return strings.ToLower(instanceGroupName), nil
|
||||
}
|
||||
|
||||
// ListScalingGroups retrieves a list of scaling groups for the cluster.
|
||||
func (c *Client) ListScalingGroups(ctx context.Context, uid string) (controlPlaneGroupIDs []string, workerGroupIDs []string, err error) {
|
||||
iter := c.instanceGroupManagersAPI.AggregatedList(ctx, &computepb.AggregatedListInstanceGroupManagersRequest{
|
||||
Filter: proto.String(fmt.Sprintf("name eq \".+-.+-%s\"", uid)), // filter by constellation UID
|
||||
Project: c.projectID,
|
||||
})
|
||||
for instanceGroupManagerScopedListPair, err := iter.Next(); ; instanceGroupManagerScopedListPair, err = iter.Next() {
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("listing instance group managers: %w", err)
|
||||
}
|
||||
if instanceGroupManagerScopedListPair.Value == nil {
|
||||
continue
|
||||
}
|
||||
for _, instanceGroupManager := range instanceGroupManagerScopedListPair.Value.InstanceGroupManagers {
|
||||
if instanceGroupManager == nil || instanceGroupManager.Name == nil || instanceGroupManager.SelfLink == nil {
|
||||
continue
|
||||
}
|
||||
groupID := uriNormalize(*instanceGroupManager.SelfLink)
|
||||
|
||||
if isControlPlaneInstanceGroup(*instanceGroupManager.Name) {
|
||||
controlPlaneGroupIDs = append(controlPlaneGroupIDs, groupID)
|
||||
} else if isWorkerInstanceGroup(*instanceGroupManager.Name) {
|
||||
workerGroupIDs = append(workerGroupIDs, groupID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return controlPlaneGroupIDs, workerGroupIDs, nil
|
||||
}
|
||||
|
||||
func (c *Client) getScalingGroupTemplate(ctx context.Context, scalingGroupID string) (*computepb.InstanceTemplate, error) {
|
||||
project, zone, instanceGroupName, err := splitInstanceGroupID(scalingGroupID)
|
||||
if err != nil {
|
||||
|
@ -282,3 +282,96 @@ func TestSetScalingGroupImage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetScalingGroupName(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
scalingGroupID string
|
||||
wantName string
|
||||
wantErr bool
|
||||
}{
|
||||
"valid scaling group ID": {
|
||||
scalingGroupID: "projects/project/zones/zone/instanceGroupManagers/instance-group",
|
||||
wantName: "instance-group",
|
||||
},
|
||||
"invalid scaling group ID": {
|
||||
scalingGroupID: "invalid",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{}
|
||||
gotName, err := client.GetScalingGroupName(context.Background(), tc.scalingGroupID)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.wantName, gotName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListScalingGroups(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
name *string
|
||||
groupID *string
|
||||
listInstanceGroupManagersErr error
|
||||
wantControlPlanes []string
|
||||
wantWorkers []string
|
||||
wantErr bool
|
||||
}{
|
||||
"list instance group managers fails": {
|
||||
listInstanceGroupManagersErr: errors.New("list instance group managers error"),
|
||||
wantErr: true,
|
||||
},
|
||||
"list instance group managers for control plane": {
|
||||
name: proto.String("test-control-plane-uid"),
|
||||
groupID: proto.String("projects/project/zones/zone/instanceGroupManagers/test-control-plane-uid"),
|
||||
wantControlPlanes: []string{
|
||||
"projects/project/zones/zone/instanceGroupManagers/test-control-plane-uid",
|
||||
},
|
||||
},
|
||||
"list instance group managers for worker": {
|
||||
name: proto.String("test-worker-uid"),
|
||||
groupID: proto.String("projects/project/zones/zone/instanceGroupManagers/test-worker-uid"),
|
||||
wantWorkers: []string{
|
||||
"projects/project/zones/zone/instanceGroupManagers/test-worker-uid",
|
||||
},
|
||||
},
|
||||
"unrelated instance group manager": {
|
||||
name: proto.String("test-unrelated-uid"),
|
||||
groupID: proto.String("projects/project/zones/zone/instanceGroupManagers/test-unrelated-uid"),
|
||||
},
|
||||
"invalid instance group manager": {},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := Client{
|
||||
instanceGroupManagersAPI: &stubInstanceGroupManagersAPI{
|
||||
aggregatedListErr: tc.listInstanceGroupManagersErr,
|
||||
instanceGroupManager: &computepb.InstanceGroupManager{
|
||||
Name: tc.name,
|
||||
SelfLink: tc.groupID,
|
||||
},
|
||||
},
|
||||
}
|
||||
gotControlPlanes, gotWorkers, err := client.ListScalingGroups(context.Background(), "uid")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
assert.ElementsMatch(tc.wantControlPlanes, gotControlPlanes)
|
||||
assert.ElementsMatch(tc.wantWorkers, gotWorkers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
azureclient "github.com/edgelesssys/constellation/operators/constellation-node-operator/internal/azure/client"
|
||||
"github.com/edgelesssys/constellation/operators/constellation-node-operator/internal/deploy"
|
||||
gcpclient "github.com/edgelesssys/constellation/operators/constellation-node-operator/internal/gcp/client"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/api/v1alpha1"
|
||||
@ -36,8 +37,11 @@ var (
|
||||
|
||||
const (
|
||||
defaultAzureCloudConfigPath = "/etc/azure/azure.json"
|
||||
defaultGCPCloudConfigPath = "/etc/gce/gce.conf"
|
||||
// constellationCSP is the environment variable stating which Cloud Service Provider Constellation is running on.
|
||||
constellationCSP = "CONSTEL_CSP"
|
||||
// constellationUID is the environment variable stating which uid is used to tag / label cloud provider resources belonging to one constellation.
|
||||
constellationUID = "constellation-uid"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -80,7 +84,10 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
case "gcp":
|
||||
cspClient, clientErr = gcpclient.New(context.Background())
|
||||
if cloudConfigPath == "" {
|
||||
cloudConfigPath = defaultGCPCloudConfigPath
|
||||
}
|
||||
cspClient, clientErr = gcpclient.New(context.Background(), cloudConfigPath)
|
||||
if clientErr != nil {
|
||||
setupLog.Error(clientErr, "unable to create GCP client")
|
||||
os.Exit(1)
|
||||
@ -115,6 +122,11 @@ func main() {
|
||||
}
|
||||
defer etcdClient.Close()
|
||||
|
||||
if err := deploy.InitialResources(context.Background(), k8sClient, cspClient, os.Getenv(constellationUID)); err != nil {
|
||||
setupLog.Error(err, "Unable to deploy initial resources")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = controllers.NewNodeImageReconciler(
|
||||
cspClient, etcdClient, mgr.GetClient(), mgr.GetScheme(),
|
||||
).SetupWithManager(mgr); err != nil {
|
||||
@ -173,4 +185,8 @@ type cspAPI interface {
|
||||
GetScalingGroupImage(ctx context.Context, scalingGroupID string) (string, error)
|
||||
// SetScalingGroupImage sets the image to be used by newly created nodes in a scaling group.
|
||||
SetScalingGroupImage(ctx context.Context, scalingGroupID, imageURI string) error
|
||||
// GetScalingGroupName retrieves the name of a scaling group.
|
||||
GetScalingGroupName(ctx context.Context, scalingGroupID string) (string, error)
|
||||
// ListScalingGroups retrieves a list of scaling groups for the cluster.
|
||||
ListScalingGroups(ctx context.Context, uid string) (controlPlaneGroupIDs []string, workerGroupIDs []string, err error)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user