deps: update Go to v1.24.2 (#3750)

* deps: update Go to v1.24.2
* tests: replace context.Background() with t.Context()

---------

Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
Daniel Weiße 2025-04-09 10:54:28 +02:00 committed by GitHub
parent a7f9561a3d
commit 4e5c213b4d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
112 changed files with 287 additions and 316 deletions

View file

@ -123,7 +123,7 @@ func TestAnnotateNodes(t *testing.T) {
},
},
}
annotated, invalid := reconciler.annotateNodes(context.Background(), []corev1.Node{tc.node})
annotated, invalid := reconciler.annotateNodes(t.Context(), []corev1.Node{tc.node})
if tc.wantAnnotated == nil {
assert.Len(annotated, 0)
assert.Len(invalid, 1)
@ -226,7 +226,7 @@ func TestPairDonorsAndHeirs(t *testing.T) {
},
}
nodeImage := updatev1alpha1.NodeVersion{}
pairs := reconciler.pairDonorsAndHeirs(context.Background(), &nodeImage, []corev1.Node{tc.outdatedNode}, []mintNode{tc.mintNode})
pairs := reconciler.pairDonorsAndHeirs(t.Context(), &nodeImage, []corev1.Node{tc.outdatedNode}, []mintNode{tc.mintNode})
if tc.wantPair == nil {
assert.Len(pairs, 0)
return
@ -315,7 +315,7 @@ func TestMatchDonorsAndHeirs(t *testing.T) {
stubReaderClient: *newStubReaderClient(t, []runtime.Object{&tc.donor, &tc.heir}, nil, nil),
},
}
pairs := reconciler.matchDonorsAndHeirs(context.Background(), nil, []corev1.Node{tc.donor}, []corev1.Node{tc.heir})
pairs := reconciler.matchDonorsAndHeirs(t.Context(), nil, []corev1.Node{tc.donor}, []corev1.Node{tc.heir})
if tc.wantPair == nil {
assert.Len(pairs, 0)
return
@ -693,7 +693,7 @@ func TestCreateNewNodes(t *testing.T) {
Scheme: getScheme(t),
}
newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.donors, tc.pendingNodes, tc.scalingGroupByID, tc.budget}
err := reconciler.createNewNodes(context.Background(), newNodeConfig)
err := reconciler.createNewNodes(t.Context(), newNodeConfig)
require.NoError(err)
assert.Equal(tc.wantCreateCalls, reconciler.nodeReplacer.(*stubNodeReplacerWriter).createCalls)
})

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package controllers
import (
"context"
"errors"
"testing"
@ -250,7 +249,7 @@ func TestFindObjectsForScalingGroup(t *testing.T) {
}
assert := assert.New(t)
reconciler := NodeVersionReconciler{}
requests := reconciler.findObjectsForScalingGroup(context.TODO(), &scalingGroup)
requests := reconciler.findObjectsForScalingGroup(t.Context(), &scalingGroup)
assert.ElementsMatch(wantRequests, requests)
}
@ -284,7 +283,7 @@ func TestFindAllNodeVersions(t *testing.T) {
reconciler := NodeVersionReconciler{
Client: newStubReaderClient(t, []runtime.Object{tc.nodeVersion}, nil, tc.listNodeVersionsErr),
}
requests := reconciler.findAllNodeVersions(context.TODO(), nil)
requests := reconciler.findAllNodeVersions(t.Context(), nil)
assert.ElementsMatch(tc.wantRequests, requests)
})
}

View file

@ -137,7 +137,7 @@ func TestFindObjectsForNode(t *testing.T) {
reconciler := PendingNodeReconciler{
Client: newStubReaderClient(t, []runtime.Object{tc.pendingNode}, nil, tc.listPendingNodesErr),
}
requests := reconciler.findObjectsForNode(context.TODO(), &corev1.Node{
requests := reconciler.findObjectsForNode(t.Context(), &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "pending-node",
},
@ -218,7 +218,7 @@ func TestReachedGoal(t *testing.T) {
reconciler := PendingNodeReconciler{
Client: newStubReaderClient(t, []runtime.Object{&tc.pendingNode}, tc.getPendingNodeErr, nil),
}
reachedGoal, err := reconciler.reachedGoal(context.Background(), tc.pendingNode, tc.nodeState)
reachedGoal, err := reconciler.reachedGoal(t.Context(), tc.pendingNode, tc.nodeState)
if tc.wantErr {
assert.Error(err)
return

View file

@ -91,7 +91,7 @@ func TestGetNodeImage(t *testing.T) {
describeInstancesErr: tc.describeInstancesErr,
},
}
gotImage, err := client.GetNodeImage(context.Background(), tc.providerID)
gotImage, err := client.GetNodeImage(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -199,7 +199,7 @@ func TestGetScalingGroupID(t *testing.T) {
describeInstancesErr: tc.describeInstancesErr,
},
}
gotScalingID, err := client.GetScalingGroupID(context.Background(), tc.providerID)
gotScalingID, err := client.GetScalingGroupID(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -357,7 +357,7 @@ func TestCreateNode(t *testing.T) {
setDesiredCapacityErr: tc.setDesiredCapacityErr,
},
}
nodeName, providerID, err := client.CreateNode(context.Background(), tc.providerID)
nodeName, providerID, err := client.CreateNode(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -398,7 +398,7 @@ func TestDeleteNode(t *testing.T) {
terminateInstanceErr: tc.terminateInstanceErr,
},
}
err := client.DeleteNode(context.Background(), tc.providerID)
err := client.DeleteNode(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"testing"
@ -161,7 +160,7 @@ func TestGetNodeState(t *testing.T) {
describeInstanceStatusErr: tc.describeInstanceStatusErr,
},
}
nodeState, err := client.GetNodeState(context.Background(), tc.providerID)
nodeState, err := client.GetNodeState(t.Context(), tc.providerID)
assert.Equal(tc.wantState, nodeState)
if tc.wantErr {
assert.Error(err)

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"testing"
"github.com/aws/aws-sdk-go-v2/service/autoscaling"
@ -91,7 +90,7 @@ func TestGetScalingGroupImage(t *testing.T) {
},
},
}
scalingGroupImage, err := client.GetScalingGroupImage(context.Background(), tc.providerID)
scalingGroupImage, err := client.GetScalingGroupImage(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -216,7 +215,7 @@ func TestSetScalingGroupImage(t *testing.T) {
},
},
}
err := client.SetScalingGroupImage(context.Background(), tc.providerID, tc.imageURI)
err := client.SetScalingGroupImage(t.Context(), tc.providerID, tc.imageURI)
if tc.wantErr {
assert.Error(err)
return
@ -319,7 +318,7 @@ func TestListScalingGroups(t *testing.T) {
describeAutoScalingGroupsErr: tc.describeAutoScalingGroupsErr,
},
}
gotGroups, err := client.ListScalingGroups(context.Background(), tc.providerID)
gotGroups, err := client.ListScalingGroups(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return

View file

@ -98,7 +98,7 @@ func TestGetNodeImage(t *testing.T) {
getErr: tc.getScaleSetVMErr,
},
}
gotImage, err := client.GetNodeImage(context.Background(), tc.providerID)
gotImage, err := client.GetNodeImage(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -131,7 +131,7 @@ func TestGetScalingGroupID(t *testing.T) {
require := require.New(t)
client := Client{}
gotScalingGroupID, err := client.GetScalingGroupID(context.Background(), tc.providerID)
gotScalingGroupID, err := client.GetScalingGroupID(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -262,7 +262,7 @@ func TestCreateNode(t *testing.T) {
var createErr error
go func() {
defer wg.Done()
gotNodeName, gotProviderID, createErr = client.CreateNode(context.Background(), tc.scalingGroupID)
gotNodeName, gotProviderID, createErr = client.CreateNode(t.Context(), tc.scalingGroupID)
}()
// want error before PollUntilDone is called
@ -319,7 +319,7 @@ func TestDeleteNode(t *testing.T) {
client := Client{
scaleSetsAPI: &stubScaleSetsAPI{deleteErr: tc.deleteErr},
}
err := client.DeleteNode(context.Background(), tc.providerID)
err := client.DeleteNode(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -343,25 +343,25 @@ func TestCapacityPollingHandler(t *testing.T) {
},
wantedCapacity: wantCapacity,
}
assert.NoError(handler.Poll(context.Background()))
assert.NoError(handler.Poll(t.Context()))
assert.False(handler.Done())
// Calling Result early should error
assert.Error(handler.Result(context.Background(), &gotCapacity))
assert.Error(handler.Result(t.Context(), &gotCapacity))
// let scaleSet API error
handler.scaleSetsAPI.(*stubScaleSetsAPI).getErr = errors.New("get error")
assert.Error(handler.Poll(context.Background()))
assert.Error(handler.Poll(t.Context()))
handler.scaleSetsAPI.(*stubScaleSetsAPI).getErr = nil
// let scaleSet API return invalid SKU
handler.scaleSetsAPI.(*stubScaleSetsAPI).scaleSet.SKU = nil
assert.Error(handler.Poll(context.Background()))
assert.Error(handler.Poll(t.Context()))
// let Poll finish
handler.scaleSetsAPI.(*stubScaleSetsAPI).scaleSet.SKU = &armcompute.SKU{Capacity: to.Ptr(wantCapacity)}
assert.NoError(handler.Poll(context.Background()))
assert.NoError(handler.Poll(t.Context()))
assert.True(handler.Done())
assert.NoError(handler.Result(context.Background(), &gotCapacity))
assert.NoError(handler.Result(t.Context(), &gotCapacity))
assert.Equal(wantCapacity, gotCapacity)
}

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"net/http"
"testing"
@ -67,7 +66,7 @@ func TestGetNodeState(t *testing.T) {
instanceViewErr: tc.getInstanceViewErr,
},
}
gotState, err := client.GetNodeState(context.Background(), tc.providerID)
gotState, err := client.GetNodeState(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"testing"
@ -103,7 +102,7 @@ func TestGetScalingGroupImage(t *testing.T) {
getErr: tc.getScaleSetErr,
},
}
gotImage, err := client.GetScalingGroupImage(context.Background(), tc.scalingGroupID)
gotImage, err := client.GetScalingGroupImage(t.Context(), tc.scalingGroupID)
if tc.wantErr {
assert.Error(err)
return
@ -155,7 +154,7 @@ func TestSetScalingGroupImage(t *testing.T) {
resultErr: tc.resultErr,
},
}
err := client.SetScalingGroupImage(context.Background(), tc.scalingGroupID, tc.imageURI)
err := client.SetScalingGroupImage(t.Context(), tc.scalingGroupID, tc.imageURI)
if tc.wantErr {
assert.Error(err)
return
@ -291,7 +290,7 @@ func TestListScalingGroups(t *testing.T) {
},
},
}
gotGroups, err := client.ListScalingGroups(context.Background(), "uid")
gotGroups, err := client.ListScalingGroups(t.Context(), "uid")
if tc.wantErr {
assert.Error(err)
return

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"math/rand"
"testing"
@ -101,7 +100,7 @@ func TestGetNodeImage(t *testing.T) {
disk: tc.disk,
},
}
gotImage, err := client.GetNodeImage(context.Background(), tc.providerID)
gotImage, err := client.GetNodeImage(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -162,7 +161,7 @@ func TestGetScalingGroupID(t *testing.T) {
instance: &instance,
},
}
gotScalingGroupID, err := client.GetScalingGroupID(context.Background(), tc.providerID)
gotScalingGroupID, err := client.GetScalingGroupID(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return
@ -221,7 +220,7 @@ func TestCreateNode(t *testing.T) {
},
prng: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
}
instanceName, providerID, err := client.CreateNode(context.Background(), tc.scalingGroupID)
instanceName, providerID, err := client.CreateNode(t.Context(), tc.scalingGroupID)
if tc.wantErr {
assert.Error(err)
return
@ -287,7 +286,7 @@ func TestDeleteNode(t *testing.T) {
},
},
}
err := client.DeleteNode(context.Background(), tc.providerID)
err := client.DeleteNode(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"net/http"
"testing"
@ -108,7 +107,7 @@ func TestGetNodeState(t *testing.T) {
},
},
}
nodeState, err := client.GetNodeState(context.Background(), tc.providerID)
nodeState, err := client.GetNodeState(t.Context(), tc.providerID)
if tc.wantErr {
assert.Error(err)
return

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"testing"
@ -57,7 +56,7 @@ func TestCanonicalProjectID(t *testing.T) {
getErr: tc.getProjectErr,
},
}
gotID, err := client.canonicalProjectID(context.Background(), tc.projectID)
gotID, err := client.canonicalProjectID(t.Context(), tc.projectID)
if tc.wantErr {
assert.Error(err)
return

View file

@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
package client
import (
"context"
"errors"
"testing"
@ -96,7 +95,7 @@ func TestGetScalingGroupImage(t *testing.T) {
template: tc.instanceTemplate,
},
}
gotImage, err := client.GetScalingGroupImage(context.Background(), tc.scalingGroupID)
gotImage, err := client.GetScalingGroupImage(t.Context(), tc.scalingGroupID)
if tc.wantErr {
assert.Error(err)
return
@ -281,7 +280,7 @@ func TestSetScalingGroupImage(t *testing.T) {
template: tc.instanceTemplate,
},
}
err := client.SetScalingGroupImage(context.Background(), tc.scalingGroupID, tc.imageURI)
err := client.SetScalingGroupImage(t.Context(), tc.scalingGroupID, tc.imageURI)
if tc.wantErr {
assert.Error(err)
return
@ -456,7 +455,7 @@ func TestListScalingGroups(t *testing.T) {
getErr: tc.templateGetErr,
},
}
gotGroups, err := client.ListScalingGroups(context.Background(), "uid")
gotGroups, err := client.ListScalingGroups(t.Context(), "uid")
if tc.wantErr {
assert.Error(err)
return

View file

@ -86,7 +86,7 @@ func TestInitialResources(t *testing.T) {
},
}
scalingGroupGetter := newScalingGroupGetter(tc.items, tc.imageErr, tc.nameErr, tc.listErr)
err := InitialResources(context.Background(), k8sClient, &stubImageInfo{}, scalingGroupGetter, "uid")
err := InitialResources(t.Context(), k8sClient, &stubImageInfo{}, scalingGroupGetter, "uid")
if tc.wantErr {
assert.Error(err)
return
@ -156,7 +156,7 @@ func TestCreateAutoscalingStrategy(t *testing.T) {
require := require.New(t)
k8sClient := &fakeK8sClient{createErr: tc.createErr}
err := createAutoscalingStrategy(context.Background(), k8sClient, "stub")
err := createAutoscalingStrategy(t.Context(), k8sClient, "stub")
if tc.wantErr {
assert.Error(err)
return
@ -246,7 +246,7 @@ func TestCreateNodeVersion(t *testing.T) {
if tc.existingNodeVersion != nil {
k8sClient.createdObjects = append(k8sClient.createdObjects, tc.existingNodeVersion)
}
err := createNodeVersion(context.Background(), k8sClient, "image-reference", "image-version")
err := createNodeVersion(t.Context(), k8sClient, "image-reference", "image-version")
if tc.wantErr {
assert.Error(err)
return

View file

@ -54,7 +54,7 @@ func TestRemoveEtcdMemberFromCluster(t *testing.T) {
},
listErr: tc.memberListErr,
}}
err := client.RemoveEtcdMemberFromCluster(context.Background(), tc.vpcIP)
err := client.RemoveEtcdMemberFromCluster(t.Context(), tc.vpcIP)
if tc.wantErr {
assert.Error(err)
return
@ -98,7 +98,7 @@ func TestGetMemberID(t *testing.T) {
members: tc.members,
listErr: tc.memberListErr,
}}
gotMemberID, err := client.getMemberID(context.Background(), "192.0.2.1")
gotMemberID, err := client.getMemberID(t.Context(), "192.0.2.1")
if tc.wantErr {
assert.Error(err)
return

View file

@ -29,7 +29,7 @@ func TestStartTriggersImmediateReconciliation(t *testing.T) {
}
exec := New(ctrl, cfg)
// on start, the executor should trigger a reconciliation
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
<-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called
ctrl.stop <- struct{}{}
@ -48,10 +48,10 @@ func TestStartMultipleTimesIsCoalesced(t *testing.T) {
}
exec := New(ctrl, cfg)
// start once
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
// start again multiple times
for i := 0; i < 10; i++ {
_ = exec.Start(context.Background())
_ = exec.Start(t.Context())
}
<-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called
@ -72,7 +72,7 @@ func TestErrorTriggersImmediateReconciliation(t *testing.T) {
RateLimiter: &stubRateLimiter{}, // no rate limiting
}
exec := New(ctrl, cfg)
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
for i := 0; i < 10; i++ {
<-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called
}
@ -96,7 +96,7 @@ func TestErrorTriggersRateLimiting(t *testing.T) {
},
}
exec := New(ctrl, cfg)
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
<-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called once to trigger rate limiting
ctrl.stop <- struct{}{}
@ -120,7 +120,7 @@ func TestRequeueAfterResultRequeueInterval(t *testing.T) {
},
}
exec := New(ctrl, cfg)
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
for i := 0; i < 10; i++ {
<-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called
}
@ -143,7 +143,7 @@ func TestExternalTrigger(t *testing.T) {
},
}
exec := New(ctrl, cfg)
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
<-ctrl.waitUntilReconciled // initial trigger
for i := 0; i < 10; i++ {
exec.Trigger()
@ -167,7 +167,7 @@ func TestSimultaneousExternalTriggers(t *testing.T) {
},
}
exec := New(ctrl, cfg)
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
<-ctrl.waitUntilReconciled // initial trigger
for i := 0; i < 100; i++ {
exec.Trigger() // extra trigger calls are coalesced
@ -184,7 +184,7 @@ func TestSimultaneousExternalTriggers(t *testing.T) {
func TestContextCancel(t *testing.T) {
assert := assert.New(t)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
ctrl := newStubController(Result{}, nil)
cfg := Config{
PollingFrequency: time.Hour * 24 * 365, // 1 year. Should be high enough to not trigger the timer in the test.
@ -219,7 +219,7 @@ func TestRequeueAfterPollingFrequency(t *testing.T) {
},
}
exec := New(ctrl, cfg)
stopAndWait := exec.Start(context.Background())
stopAndWait := exec.Start(t.Context())
for i := 0; i < 10; i++ {
<-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called
}

View file

@ -49,17 +49,17 @@ func TestResult(t *testing.T) {
pollErr: tc.pollErr,
resultErr: tc.resultErr,
})
_, firstErr := poller.Result(context.Background())
_, firstErr := poller.Result(t.Context())
if tc.wantErr {
assert.Error(firstErr)
// calling Result again should return the same error
_, secondErr := poller.Result(context.Background())
_, secondErr := poller.Result(t.Context())
assert.Equal(firstErr, secondErr)
return
}
assert.NoError(firstErr)
// calling Result again should still not return an error
_, secondErr := poller.Result(context.Background())
_, secondErr := poller.Result(t.Context())
assert.NoError(secondErr)
})
}
@ -136,7 +136,7 @@ func TestPollUntilDone(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
gotResult, gotErr = poller.PollUntilDone(context.Background(), &PollUntilDoneOptions{
gotResult, gotErr = poller.PollUntilDone(t.Context(), &PollUntilDoneOptions{
MaxBackoff: tc.maxBackoff,
Clock: clock,
})

View file

@ -40,7 +40,7 @@ func TestGRPCDialer(t *testing.T) {
require.Equal(os.ModeSocket, fileInfo.Mode()&os.ModeType)
upgradeClient := newClientWithAddress(sockAddr)
require.NoError(upgradeClient.Upgrade(context.Background(), []*components.Component{}, "v1.29.6"))
require.NoError(upgradeClient.Upgrade(t.Context(), []*components.Component{}, "v1.29.6"))
}
type fakeUpgradeAgent struct {

View file

@ -88,7 +88,7 @@ func TestCreateScalingGroupIfNotExists(t *testing.T) {
autoscalingGroupName: "autoscaling-group-name",
role: updatev1alpha1.WorkerRole,
}
err := createScalingGroupIfNotExists(context.Background(), newScalingGroupConfig)
err := createScalingGroupIfNotExists(t.Context(), newScalingGroupConfig)
if tc.wantErr {
assert.Error(err)
return
@ -184,7 +184,7 @@ func TestPatchNodeGroupName(t *testing.T) {
getErr: tc.getErr,
updateErr: tc.updateErr,
}
gotExists, gotErr := patchNodeGroupName(context.Background(), k8sClient, "resource-name", "node-group-name")
gotExists, gotErr := patchNodeGroupName(t.Context(), k8sClient, "resource-name", "node-group-name")
if tc.wantErr {
assert.Error(gotErr)
return