mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-23 05:41:19 -05:00
[node operator] PendingNode controller env test
Signed-off-by: Malte Poll <mp@edgeless.systems>
This commit is contained in:
parent
19568d400b
commit
614447495d
@ -0,0 +1,185 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("PendingNode controller", func() {
|
||||
// Define utility constants for object names and testing timeouts/durations and intervals.
|
||||
const (
|
||||
pendingNodeName = "pending-node"
|
||||
|
||||
timeout = time.Second * 10
|
||||
duration = time.Second * 2
|
||||
interval = time.Millisecond * 250
|
||||
)
|
||||
|
||||
pendingNodeLookupKey := types.NamespacedName{Name: pendingNodeName}
|
||||
|
||||
Context("When creating pending node with goal join", func() {
|
||||
It("Should terminate the node after failing to join by the deadline", func() {
|
||||
By("setting the CSP node state to creating")
|
||||
fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateCreating)
|
||||
|
||||
By("creating a pending node resource")
|
||||
ctx := context.Background()
|
||||
pendingNode := &updatev1alpha1.PendingNode{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "update.edgeless.systems/v1alpha1",
|
||||
Kind: "PendingNode",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pendingNodeName,
|
||||
},
|
||||
Spec: updatev1alpha1.PendingNodeSpec{
|
||||
ProviderID: "provider-id",
|
||||
ScalingGroupID: "scaling-group-id",
|
||||
NodeName: "test-node",
|
||||
Goal: updatev1alpha1.NodeGoalJoin,
|
||||
// create without deadline first
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, pendingNode)).Should(Succeed())
|
||||
createdPendingNode := &updatev1alpha1.PendingNode{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(createdPendingNode.Spec.NodeName).Should(Equal("test-node"))
|
||||
|
||||
By("checking the pending node state is creating")
|
||||
Eventually(func() updatev1alpha1.CSPNodeState {
|
||||
if err := k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode); err != nil {
|
||||
return ""
|
||||
}
|
||||
return createdPendingNode.Status.CSPNodeState
|
||||
}, timeout, interval).Should(Equal(updatev1alpha1.NodeStateCreating))
|
||||
|
||||
By("updating the deadline to be in the past")
|
||||
deadline := fakes.clock.Now().Add(-time.Second)
|
||||
Expect(k8sClient.Get(ctx, pendingNodeLookupKey, pendingNode)).Should(Succeed())
|
||||
pendingNode.Spec.Deadline = &metav1.Time{Time: deadline}
|
||||
Expect(k8sClient.Update(ctx, pendingNode)).Should(Succeed())
|
||||
|
||||
By("checking the pending node updates its goal")
|
||||
Eventually(func() updatev1alpha1.PendingNodeGoal {
|
||||
if err := k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode); err != nil {
|
||||
return ""
|
||||
}
|
||||
return createdPendingNode.Spec.Goal
|
||||
}, timeout, interval).Should(Equal(updatev1alpha1.NodeGoalLeave))
|
||||
|
||||
By("setting the CSP node state to terminated")
|
||||
fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateTerminated)
|
||||
// trigger reconciliation before regular check interval to speed up test by changing the spec
|
||||
Expect(k8sClient.Get(ctx, pendingNodeLookupKey, pendingNode)).Should(Succeed())
|
||||
pendingNode.Spec.Deadline = &metav1.Time{Time: fakes.clock.Now().Add(time.Second)}
|
||||
Expect(k8sClient.Update(ctx, pendingNode)).Should(Succeed())
|
||||
|
||||
By("checking if the pending node resource is deleted")
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode)
|
||||
}, timeout, interval).Should(MatchError(&errors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: `pendingnodes.update.edgeless.systems "pending-node" not found`,
|
||||
Reason: "NotFound",
|
||||
Details: &metav1.StatusDetails{
|
||||
Name: pendingNodeName,
|
||||
Group: "update.edgeless.systems",
|
||||
Kind: "pendingnodes",
|
||||
},
|
||||
Code: http.StatusNotFound,
|
||||
},
|
||||
}))
|
||||
})
|
||||
|
||||
It("Should should detect successful node join", func() {
|
||||
By("setting the CSP node state to creating")
|
||||
fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateCreating)
|
||||
|
||||
By("creating a pending node resource")
|
||||
ctx := context.Background()
|
||||
pendingNode := &updatev1alpha1.PendingNode{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "update.edgeless.systems/v1alpha1",
|
||||
Kind: "PendingNode",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pendingNodeName,
|
||||
},
|
||||
Spec: updatev1alpha1.PendingNodeSpec{
|
||||
ProviderID: "provider-id",
|
||||
ScalingGroupID: "scaling-group-id",
|
||||
NodeName: "test-node",
|
||||
Goal: updatev1alpha1.NodeGoalJoin,
|
||||
// deadline is always one second in the future
|
||||
Deadline: &metav1.Time{Time: fakes.clock.Now().Add(time.Second)},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, pendingNode)).Should(Succeed())
|
||||
createdPendingNode := &updatev1alpha1.PendingNode{}
|
||||
Eventually(func() bool {
|
||||
if err := k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(createdPendingNode.Spec.NodeName).Should(Equal("test-node"))
|
||||
|
||||
By("checking the pending node state is creating")
|
||||
Eventually(func() updatev1alpha1.CSPNodeState {
|
||||
if err := k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode); err != nil {
|
||||
return ""
|
||||
}
|
||||
return createdPendingNode.Status.CSPNodeState
|
||||
}, timeout, interval).Should(Equal(updatev1alpha1.NodeStateCreating))
|
||||
|
||||
By("setting the CSP node state to ready")
|
||||
fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateReady)
|
||||
|
||||
By("creating a new node resource with the same node name and provider ID")
|
||||
node := &corev1.Node{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Node",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node",
|
||||
},
|
||||
Spec: corev1.NodeSpec{
|
||||
ProviderID: "provider-id",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, node)).Should(Succeed())
|
||||
|
||||
By("checking the pending node goal has been reached")
|
||||
Eventually(func() updatev1alpha1.PendingNodeStatus {
|
||||
if err := k8sClient.Get(ctx, pendingNodeLookupKey, createdPendingNode); err != nil {
|
||||
return updatev1alpha1.PendingNodeStatus{}
|
||||
}
|
||||
return createdPendingNode.Status
|
||||
}, timeout, interval).Should(Equal(updatev1alpha1.PendingNodeStatus{
|
||||
CSPNodeState: updatev1alpha1.NodeStateReady,
|
||||
ReachedGoal: true,
|
||||
}))
|
||||
|
||||
By("cleaning up all resources")
|
||||
Expect(k8sClient.Delete(ctx, pendingNode)).Should(Succeed())
|
||||
Expect(k8sClient.Delete(ctx, node)).Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
@ -12,6 +13,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
testclock "k8s.io/utils/clock/testing"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
@ -85,6 +87,14 @@ var _ = BeforeSuite(func() {
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&PendingNodeReconciler{
|
||||
nodeStateGetter: fakes.nodeStateGetter,
|
||||
Client: k8sManager.GetClient(),
|
||||
Scheme: k8sManager.GetScheme(),
|
||||
Clock: fakes.clock,
|
||||
}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err = k8sManager.Start(ctx)
|
||||
@ -101,10 +111,14 @@ var _ = AfterSuite(func() {
|
||||
|
||||
type fakeCollection struct {
|
||||
scalingGroupUpdater *fakeScalingGroupUpdater
|
||||
nodeStateGetter *stubNodeStateGetter
|
||||
clock *testclock.FakeClock
|
||||
}
|
||||
|
||||
func newFakes() fakeCollection {
|
||||
return fakeCollection{
|
||||
scalingGroupUpdater: newFakeScalingGroupUpdater(),
|
||||
nodeStateGetter: &stubNodeStateGetter{},
|
||||
clock: testclock.NewFakeClock(time.Now()),
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user