Use config structs to limit nr of function args

This commit is contained in:
Otto Bittner 2022-11-21 18:01:23 +01:00
parent 928fdcff76
commit 048ab94123
4 changed files with 43 additions and 26 deletions

View file

@ -192,7 +192,8 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{Requeue: shouldRequeue}, nil return ctrl.Result{Requeue: shouldRequeue}, nil
} }
if err := r.createNewNodes(ctx, desiredNodeImage, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget); err != nil { newNodeConfig := newNodeConfig{desiredNodeImage, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget}
if err := r.createNewNodes(ctx, newNodeConfig); err != nil {
return ctrl.Result{Requeue: shouldRequeue}, nil return ctrl.Result{Requeue: shouldRequeue}, nil
} }
// cleanup obsolete nodes // cleanup obsolete nodes
@ -508,17 +509,13 @@ func (r *NodeImageReconciler) deleteNode(ctx context.Context, controller metav1.
} }
// createNewNodes creates new nodes using up to date images as replacement for outdated nodes. // createNewNodes creates new nodes using up to date images as replacement for outdated nodes.
func (r *NodeImageReconciler) createNewNodes( func (r *NodeImageReconciler) createNewNodes(ctx context.Context, config newNodeConfig) error {
ctx context.Context, desiredNodeImage updatev1alpha1.NodeImage,
outdatedNodes []corev1.Node, pendingNodes []updatev1alpha1.PendingNode,
scalingGroupByID map[string]updatev1alpha1.ScalingGroup, newNodesBudget int,
) error {
logr := log.FromContext(ctx) logr := log.FromContext(ctx)
if newNodesBudget < 1 || len(outdatedNodes) == 0 { if config.newNodesBudget < 1 || len(config.outdatedNodes) == 0 {
return nil return nil
} }
outdatedNodesPerScalingGroup := make(map[string]int) outdatedNodesPerScalingGroup := make(map[string]int)
for _, node := range outdatedNodes { for _, node := range config.outdatedNodes {
// skip outdated nodes that got assigned an heir in this Reconcile call // skip outdated nodes that got assigned an heir in this Reconcile call
if len(node.Annotations[heirAnnotation]) != 0 { if len(node.Annotations[heirAnnotation]) != 0 {
continue continue
@ -526,7 +523,7 @@ func (r *NodeImageReconciler) createNewNodes(
outdatedNodesPerScalingGroup[strings.ToLower(node.Annotations[scalingGroupAnnotation])]++ outdatedNodesPerScalingGroup[strings.ToLower(node.Annotations[scalingGroupAnnotation])]++
} }
pendingJoiningNodesPerScalingGroup := make(map[string]int) pendingJoiningNodesPerScalingGroup := make(map[string]int)
for _, pendingNode := range pendingNodes { for _, pendingNode := range config.pendingNodes {
// skip pending nodes that are not joining // skip pending nodes that are not joining
if pendingNode.Spec.Goal != updatev1alpha1.NodeGoalJoin { if pendingNode.Spec.Goal != updatev1alpha1.NodeGoalJoin {
continue continue
@ -541,20 +538,20 @@ func (r *NodeImageReconciler) createNewNodes(
} }
} }
for scalingGroupID := range requiredNodesPerScalingGroup { for scalingGroupID := range requiredNodesPerScalingGroup {
scalingGroup, ok := scalingGroupByID[scalingGroupID] scalingGroup, ok := config.scalingGroupByID[scalingGroupID]
if !ok { if !ok {
logr.Info("Scaling group does not have matching resource", "scalingGroup", scalingGroupID, "scalingGroups", scalingGroupByID) logr.Info("Scaling group does not have matching resource", "scalingGroup", scalingGroupID, "scalingGroups", config.scalingGroupByID)
continue continue
} }
if !strings.EqualFold(scalingGroup.Status.ImageReference, desiredNodeImage.Spec.ImageReference) { if !strings.EqualFold(scalingGroup.Status.ImageReference, config.desiredNodeImage.Spec.ImageReference) {
logr.Info("Scaling group does not use latest image", "scalingGroup", scalingGroupID, "usedImage", scalingGroup.Status.ImageReference, "wantedImage", desiredNodeImage.Spec.ImageReference) logr.Info("Scaling group does not use latest image", "scalingGroup", scalingGroupID, "usedImage", scalingGroup.Status.ImageReference, "wantedImage", config.desiredNodeImage.Spec.ImageReference)
continue continue
} }
if requiredNodesPerScalingGroup[scalingGroupID] == 0 { if requiredNodesPerScalingGroup[scalingGroupID] == 0 {
continue continue
} }
for { for {
if newNodesBudget == 0 { if config.newNodesBudget == 0 {
return nil return nil
} }
if requiredNodesPerScalingGroup[scalingGroupID] == 0 { if requiredNodesPerScalingGroup[scalingGroupID] == 0 {
@ -576,7 +573,7 @@ func (r *NodeImageReconciler) createNewNodes(
Deadline: &deadline, Deadline: &deadline,
}, },
} }
if err := ctrl.SetControllerReference(&desiredNodeImage, pendingNode, r.Scheme); err != nil { if err := ctrl.SetControllerReference(&config.desiredNodeImage, pendingNode, r.Scheme); err != nil {
return err return err
} }
if err := r.Create(ctx, pendingNode); err != nil { if err := r.Create(ctx, pendingNode); err != nil {
@ -584,7 +581,7 @@ func (r *NodeImageReconciler) createNewNodes(
} }
logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID) logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID)
requiredNodesPerScalingGroup[scalingGroupID]-- requiredNodesPerScalingGroup[scalingGroupID]--
newNodesBudget-- config.newNodesBudget--
} }
} }
return nil return nil
@ -817,3 +814,11 @@ type etcdRemover interface {
// RemoveEtcdMemberFromCluster removes an etcd member from the cluster. // RemoveEtcdMemberFromCluster removes an etcd member from the cluster.
RemoveEtcdMemberFromCluster(ctx context.Context, vpcIP string) error RemoveEtcdMemberFromCluster(ctx context.Context, vpcIP string) error
} }
type newNodeConfig struct {
desiredNodeImage updatev1alpha1.NodeImage
outdatedNodes []corev1.Node
pendingNodes []updatev1alpha1.PendingNode
scalingGroupByID map[string]updatev1alpha1.ScalingGroup
newNodesBudget int
}

View file

@ -590,7 +590,8 @@ func TestCreateNewNodes(t *testing.T) {
}, },
Scheme: getScheme(t), Scheme: getScheme(t),
} }
err := reconciler.createNewNodes(context.Background(), desiredNodeImage, tc.outdatedNodes, tc.pendingNodes, tc.scalingGroupByID, tc.budget) newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.pendingNodes, tc.scalingGroupByID, tc.budget}
err := reconciler.createNewNodes(context.Background(), newNodeConfig)
require.NoError(err) require.NoError(err)
assert.Equal(tc.wantCreateCalls, reconciler.nodeReplacer.(*stubNodeReplacerWriter).createCalls) assert.Equal(tc.wantCreateCalls, reconciler.nodeReplacer.(*stubNodeReplacerWriter).createCalls)
}) })

View file

@ -52,7 +52,8 @@ func InitialResources(ctx context.Context, k8sClient client.Writer, scalingGroup
if err != nil { if err != nil {
return fmt.Errorf("determining autoscaling group name of %q: %w", groupID, err) return fmt.Errorf("determining autoscaling group name of %q: %w", groupID, err)
} }
if err := createScalingGroup(ctx, k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.ControlPlaneRole); err != nil { newScalingGroupConfig := newScalingGroupConfig{k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.ControlPlaneRole}
if err := createScalingGroup(ctx, newScalingGroupConfig); err != nil {
return fmt.Errorf("creating initial control plane scaling group: %w", err) return fmt.Errorf("creating initial control plane scaling group: %w", err)
} }
} }
@ -65,7 +66,8 @@ func InitialResources(ctx context.Context, k8sClient client.Writer, scalingGroup
if err != nil { if err != nil {
return fmt.Errorf("determining autoscaling group name of %q: %w", groupID, err) return fmt.Errorf("determining autoscaling group name of %q: %w", groupID, err)
} }
if err := createScalingGroup(ctx, k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.WorkerRole); err != nil { newScalingGroupConfig := newScalingGroupConfig{k8sClient, groupID, groupName, autoscalingGroupName, updatev1alpha1.WorkerRole}
if err := createScalingGroup(ctx, newScalingGroupConfig); err != nil {
return fmt.Errorf("creating initial worker scaling group: %w", err) return fmt.Errorf("creating initial worker scaling group: %w", err)
} }
} }
@ -116,19 +118,19 @@ func createNodeImage(ctx context.Context, k8sClient client.Writer, imageReferenc
} }
// createScalingGroup creates an initial scaling group resource if it does not exist yet. // createScalingGroup creates an initial scaling group resource if it does not exist yet.
func createScalingGroup(ctx context.Context, k8sClient client.Writer, groupID, groupName, autoscalingGroupName string, role updatev1alpha1.NodeRole) error { func createScalingGroup(ctx context.Context, config newScalingGroupConfig) error {
err := k8sClient.Create(ctx, &updatev1alpha1.ScalingGroup{ err := config.k8sClient.Create(ctx, &updatev1alpha1.ScalingGroup{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "ScalingGroup"}, TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "ScalingGroup"},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: strings.ToLower(groupName), Name: strings.ToLower(config.groupName),
}, },
Spec: updatev1alpha1.ScalingGroupSpec{ Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: constants.NodeImageResourceName, NodeImage: constants.NodeImageResourceName,
GroupID: groupID, GroupID: config.groupID,
AutoscalerGroupName: autoscalingGroupName, AutoscalerGroupName: config.autoscalingGroupName,
Min: 1, Min: 1,
Max: 10, Max: 10,
Role: role, Role: config.role,
}, },
}) })
if k8sErrors.IsAlreadyExists(err) { if k8sErrors.IsAlreadyExists(err) {
@ -149,3 +151,11 @@ type scalingGroupGetter interface {
// AutoscalingCloudProvider returns the cloud-provider name as used by k8s cluster-autoscaler. // AutoscalingCloudProvider returns the cloud-provider name as used by k8s cluster-autoscaler.
AutoscalingCloudProvider() string AutoscalingCloudProvider() string
} }
type newScalingGroupConfig struct {
k8sClient client.Writer
groupID string
groupName string
autoscalingGroupName string
role updatev1alpha1.NodeRole
}

View file

@ -273,7 +273,8 @@ func TestCreateScalingGroup(t *testing.T) {
require := require.New(t) require := require.New(t)
k8sClient := &stubK8sClient{createErr: tc.createErr} k8sClient := &stubK8sClient{createErr: tc.createErr}
err := createScalingGroup(context.Background(), k8sClient, "group-id", "group-Name", "group-Name", updatev1alpha1.WorkerRole) newScalingGroupConfig := newScalingGroupConfig{k8sClient, "group-id", "group-Name", "group-Name", updatev1alpha1.WorkerRole}
err := createScalingGroup(context.Background(), newScalingGroupConfig)
if tc.wantErr { if tc.wantErr {
assert.Error(err) assert.Error(err)
return return