Use config structs to limit nr of function args

This commit is contained in:
Otto Bittner 2022-11-21 18:01:23 +01:00
parent 928fdcff76
commit 048ab94123
4 changed files with 43 additions and 26 deletions

View File

@ -192,7 +192,8 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{Requeue: shouldRequeue}, nil
}
if err := r.createNewNodes(ctx, desiredNodeImage, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget); err != nil {
newNodeConfig := newNodeConfig{desiredNodeImage, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget}
if err := r.createNewNodes(ctx, newNodeConfig); err != nil {
return ctrl.Result{Requeue: shouldRequeue}, nil
}
// cleanup obsolete nodes
@ -508,17 +509,13 @@ func (r *NodeImageReconciler) deleteNode(ctx context.Context, controller metav1.
}
// createNewNodes creates new nodes using up to date images as replacement for outdated nodes.
func (r *NodeImageReconciler) createNewNodes(
ctx context.Context, desiredNodeImage updatev1alpha1.NodeImage,
outdatedNodes []corev1.Node, pendingNodes []updatev1alpha1.PendingNode,
scalingGroupByID map[string]updatev1alpha1.ScalingGroup, newNodesBudget int,
) error {
func (r *NodeImageReconciler) createNewNodes(ctx context.Context, config newNodeConfig) error {
logr := log.FromContext(ctx)
if newNodesBudget < 1 || len(outdatedNodes) == 0 {
if config.newNodesBudget < 1 || len(config.outdatedNodes) == 0 {
return nil
}
outdatedNodesPerScalingGroup := make(map[string]int)
for _, node := range outdatedNodes {
for _, node := range config.outdatedNodes {
// skip outdated nodes that got assigned an heir in this Reconcile call
if len(node.Annotations[heirAnnotation]) != 0 {
continue
@ -526,7 +523,7 @@ func (r *NodeImageReconciler) createNewNodes(
outdatedNodesPerScalingGroup[strings.ToLower(node.Annotations[scalingGroupAnnotation])]++
}
pendingJoiningNodesPerScalingGroup := make(map[string]int)
for _, pendingNode := range pendingNodes {
for _, pendingNode := range config.pendingNodes {
// skip pending nodes that are not joining
if pendingNode.Spec.Goal != updatev1alpha1.NodeGoalJoin {
continue
@ -541,20 +538,20 @@ func (r *NodeImageReconciler) createNewNodes(
}
}
for scalingGroupID := range requiredNodesPerScalingGroup {
scalingGroup, ok := scalingGroupByID[scalingGroupID]
scalingGroup, ok := config.scalingGroupByID[scalingGroupID]
if !ok {
logr.Info("Scaling group does not have matching resource", "scalingGroup", scalingGroupID, "scalingGroups", scalingGroupByID)
logr.Info("Scaling group does not have matching resource", "scalingGroup", scalingGroupID, "scalingGroups", config.scalingGroupByID)
continue
}
if !strings.EqualFold(scalingGroup.Status.ImageReference, desiredNodeImage.Spec.ImageReference) {
logr.Info("Scaling group does not use latest image", "scalingGroup", scalingGroupID, "usedImage", scalingGroup.Status.ImageReference, "wantedImage", desiredNodeImage.Spec.ImageReference)
if !strings.EqualFold(scalingGroup.Status.ImageReference, config.desiredNodeImage.Spec.ImageReference) {
logr.Info("Scaling group does not use latest image", "scalingGroup", scalingGroupID, "usedImage", scalingGroup.Status.ImageReference, "wantedImage", config.desiredNodeImage.Spec.ImageReference)
continue
}
if requiredNodesPerScalingGroup[scalingGroupID] == 0 {
continue
}
for {
if newNodesBudget == 0 {
if config.newNodesBudget == 0 {
return nil
}
if requiredNodesPerScalingGroup[scalingGroupID] == 0 {
@ -576,7 +573,7 @@ func (r *NodeImageReconciler) createNewNodes(
Deadline: &deadline,
},
}
if err := ctrl.SetControllerReference(&desiredNodeImage, pendingNode, r.Scheme); err != nil {
if err := ctrl.SetControllerReference(&config.desiredNodeImage, pendingNode, r.Scheme); err != nil {
return err
}
if err := r.Create(ctx, pendingNode); err != nil {
@ -584,7 +581,7 @@ func (r *NodeImageReconciler) createNewNodes(
}
logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID)
requiredNodesPerScalingGroup[scalingGroupID]--
newNodesBudget--
config.newNodesBudget--
}
}
return nil
@ -817,3 +814,11 @@ type etcdRemover interface {
// RemoveEtcdMemberFromCluster removes an etcd member from the cluster.
RemoveEtcdMemberFromCluster(ctx context.Context, vpcIP string) error
}
type newNodeConfig struct {
desiredNodeImage updatev1alpha1.NodeImage
outdatedNodes []corev1.Node
pendingNodes []updatev1alpha1.PendingNode
scalingGroupByID map[string]updatev1alpha1.ScalingGroup
newNodesBudget int
}

View File

@ -590,7 +590,8 @@ func TestCreateNewNodes(t *testing.T) {
},
Scheme: getScheme(t),
}
err := reconciler.createNewNodes(context.Background(), desiredNodeImage, tc.outdatedNodes, tc.pendingNodes, tc.scalingGroupByID, tc.budget)
newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.pendingNodes, tc.scalingGroupByID, tc.budget}
err := reconciler.createNewNodes(context.Background(), newNodeConfig)
require.NoError(err)
assert.Equal(tc.wantCreateCalls, reconciler.nodeReplacer.(*stubNodeReplacerWriter).createCalls)
})

View File

@ -52,7 +52,8 @@ func InitialResources(ctx context.Context, k8sClient client.Writer, scalingGroup
if err != nil {
return fmt.Errorf("determining autoscaling group name of %q: %w", groupID, err)
}
if err := createScalingGroup(ctx, k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.ControlPlaneRole); err != nil {
newScalingGroupConfig := newScalingGroupConfig{k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.ControlPlaneRole}
if err := createScalingGroup(ctx, newScalingGroupConfig); err != nil {
return fmt.Errorf("creating initial control plane scaling group: %w", err)
}
}
@ -65,7 +66,8 @@ func InitialResources(ctx context.Context, k8sClient client.Writer, scalingGroup
if err != nil {
return fmt.Errorf("determining autoscaling group name of %q: %w", groupID, err)
}
if err := createScalingGroup(ctx, k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.WorkerRole); err != nil {
newScalingGroupConfig := newScalingGroupConfig{k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.WorkerRole}
if err := createScalingGroup(ctx, newScalingGroupConfig); err != nil {
return fmt.Errorf("creating initial worker scaling group: %w", err)
}
}
@ -116,19 +118,19 @@ func createNodeImage(ctx context.Context, k8sClient client.Writer, imageReferenc
}
// createScalingGroup creates an initial scaling group resource if it does not exist yet.
func createScalingGroup(ctx context.Context, k8sClient client.Writer, groupID, groupName, autoscalingGroupName string, role updatev1alpha1.NodeRole) error {
err := k8sClient.Create(ctx, &updatev1alpha1.ScalingGroup{
func createScalingGroup(ctx context.Context, config newScalingGroupConfig) error {
err := config.k8sClient.Create(ctx, &updatev1alpha1.ScalingGroup{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "ScalingGroup"},
ObjectMeta: metav1.ObjectMeta{
Name: strings.ToLower(groupName),
Name: strings.ToLower(config.groupName),
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: constants.NodeImageResourceName,
GroupID: groupID,
AutoscalerGroupName: autoscalingGroupName,
GroupID: config.groupID,
AutoscalerGroupName: config.autoscalingGroupName,
Min: 1,
Max: 10,
Role: role,
Role: config.role,
},
})
if k8sErrors.IsAlreadyExists(err) {
@ -149,3 +151,11 @@ type scalingGroupGetter interface {
// AutoscalingCloudProvider returns the cloud-provider name as used by k8s cluster-autoscaler.
AutoscalingCloudProvider() string
}
type newScalingGroupConfig struct {
k8sClient client.Writer
groupID string
groupName string
autoscalingGroupName string
role updatev1alpha1.NodeRole
}

View File

@ -273,7 +273,8 @@ func TestCreateScalingGroup(t *testing.T) {
require := require.New(t)
k8sClient := &stubK8sClient{createErr: tc.createErr}
err := createScalingGroup(context.Background(), k8sClient, "group-id", "group-Name", "group-Name", updatev1alpha1.WorkerRole)
newScalingGroupConfig := newScalingGroupConfig{k8sClient, "group-id", "group-Name", "group-Name", updatev1alpha1.WorkerRole}
err := createScalingGroup(context.Background(), newScalingGroupConfig)
if tc.wantErr {
assert.Error(err)
return