2022-09-05 03:06:08 -04:00
/ *
Copyright ( c ) Edgeless Systems GmbH
SPDX - License - Identifier : AGPL - 3.0 - only
* /
2022-06-27 04:58:09 -04:00
package controllers
import (
"context"
2023-01-06 06:08:25 -05:00
"encoding/json"
"errors"
2022-06-29 08:48:40 -04:00
"reflect"
2022-08-05 06:17:53 -04:00
"strings"
2022-06-29 08:48:40 -04:00
"time"
2022-06-27 04:58:09 -04:00
2023-01-04 17:21:05 -05:00
mainconstants "github.com/edgelesssys/constellation/v2/internal/constants"
2023-01-06 06:08:25 -05:00
"github.com/edgelesssys/constellation/v2/internal/versions/components"
2023-01-04 13:04:28 -05:00
nodeutil "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/internal/node"
"github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/internal/patch"
2023-01-06 06:08:25 -05:00
"golang.org/x/mod/semver"
2022-06-29 08:48:40 -04:00
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2022-06-27 04:58:09 -04:00
"k8s.io/apimachinery/pkg/runtime"
2022-06-29 08:48:40 -04:00
"k8s.io/apimachinery/pkg/types"
2023-01-06 06:08:25 -05:00
"k8s.io/apimachinery/pkg/version"
2022-06-29 08:48:40 -04:00
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/retry"
2022-06-27 04:58:09 -04:00
ctrl "sigs.k8s.io/controller-runtime"
2022-06-29 08:48:40 -04:00
"sigs.k8s.io/controller-runtime/pkg/builder"
2022-06-27 04:58:09 -04:00
"sigs.k8s.io/controller-runtime/pkg/client"
2022-06-29 08:48:40 -04:00
"sigs.k8s.io/controller-runtime/pkg/handler"
2022-06-27 04:58:09 -04:00
"sigs.k8s.io/controller-runtime/pkg/log"
2022-06-29 08:48:40 -04:00
"sigs.k8s.io/controller-runtime/pkg/source"
2022-06-27 04:58:09 -04:00
2023-01-04 13:04:28 -05:00
updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/v2/api/v1alpha1"
2022-06-29 08:48:40 -04:00
nodemaintenancev1beta1 "github.com/medik8s/node-maintenance-operator/api/v1beta1"
)
const (
// nodeOverprovisionLimit is the maximum number of extra nodes created during the update procedure at any point in time.
2022-08-05 06:17:53 -04:00
nodeOverprovisionLimit = 1
2022-06-29 08:48:40 -04:00
// nodeJoinTimeout is the time limit pending nodes have to join the cluster before being terminated.
2022-08-05 06:17:53 -04:00
nodeJoinTimeout = time . Minute * 30
2022-06-29 08:48:40 -04:00
// nodeLeaveTimeout is the time limit pending nodes have to leave the cluster and being terminated.
2023-01-03 06:09:53 -05:00
nodeLeaveTimeout = time . Minute
donorAnnotation = "constellation.edgeless.systems/donor"
heirAnnotation = "constellation.edgeless.systems/heir"
scalingGroupAnnotation = "constellation.edgeless.systems/scaling-group-id"
nodeImageAnnotation = "constellation.edgeless.systems/node-image"
obsoleteAnnotation = "constellation.edgeless.systems/obsolete"
conditionNodeVersionUpToDateReason = "NodeVersionsUpToDate"
conditionNodeVersionUpToDateMessage = "Node version of every node is up to date"
conditionNodeVersionOutOfDateReason = "NodeVersionsOutOfDate"
conditionNodeVersionOutOfDateMessage = "Some node versions are out of date"
2022-06-27 04:58:09 -04:00
)
2023-01-03 06:09:53 -05:00
// NodeVersionReconciler reconciles a NodeVersion object.
type NodeVersionReconciler struct {
2022-06-29 08:48:40 -04:00
nodeReplacer
2022-07-18 10:39:48 -04:00
etcdRemover
2023-01-06 06:08:25 -05:00
clusterUpgrader
kubernetesServerVersionGetter
2022-06-27 04:58:09 -04:00
client . Client
Scheme * runtime . Scheme
}
2023-01-03 06:09:53 -05:00
// NewNodeVersionReconciler creates a new NodeVersionReconciler.
2023-01-06 06:08:25 -05:00
func NewNodeVersionReconciler ( nodeReplacer nodeReplacer , etcdRemover etcdRemover , clusterUpgrader clusterUpgrader , k8sVerGetter kubernetesServerVersionGetter , client client . Client , scheme * runtime . Scheme ) * NodeVersionReconciler {
2023-01-03 06:09:53 -05:00
return & NodeVersionReconciler {
2023-01-06 06:08:25 -05:00
nodeReplacer : nodeReplacer ,
etcdRemover : etcdRemover ,
clusterUpgrader : clusterUpgrader ,
kubernetesServerVersionGetter : k8sVerGetter ,
Client : client ,
Scheme : scheme ,
2022-06-29 08:48:40 -04:00
}
}
2023-01-03 06:09:53 -05:00
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversions,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversions/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversions/finalizers,verbs=update
2022-07-20 04:53:03 -04:00
//+kubebuilder:rbac:groups=nodemaintenance.medik8s.io,resources=nodemaintenances,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get
2023-01-06 06:08:25 -05:00
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=list;get
2022-06-27 04:58:09 -04:00
2023-01-03 06:09:53 -05:00
// Reconcile replaces outdated nodes (using an old image) with new nodes (using a new image) as specified in the NodeVersion spec.
func ( r * NodeVersionReconciler ) Reconcile ( ctx context . Context , req ctrl . Request ) ( ctrl . Result , error ) {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
2023-01-03 06:09:53 -05:00
logr . Info ( "Reconciling NodeVersion" )
2022-06-29 08:48:40 -04:00
2023-01-03 06:09:53 -05:00
var desiredNodeVersion updatev1alpha1 . NodeVersion
if err := r . Get ( ctx , req . NamespacedName , & desiredNodeVersion ) ; err != nil {
2022-06-29 08:48:40 -04:00
return ctrl . Result { } , client . IgnoreNotFound ( err )
}
2023-01-06 06:08:25 -05:00
// Check if we need to upgrade the cluster version.
serverVer , err := r . ServerVersion ( )
if err != nil {
return ctrl . Result { } , err
}
// GitVersion is the semantic version of the Kubernetes server e.g. "v1.24.9"
if semver . Compare ( serverVer . GitVersion , desiredNodeVersion . Spec . KubernetesClusterVersion ) != 0 {
r . tryStartClusterVersionUpgrade ( ctx , req . NamespacedName )
}
2022-06-29 08:48:40 -04:00
// get list of autoscaling strategies
// there should be exactly one autoscaling strategy but we do not specify its name.
// if there is no autoscaling strategy, it is assumed that autoscaling is disabled.
var autoscalingStrategiesList updatev1alpha1 . AutoscalingStrategyList
if err := r . List ( ctx , & autoscalingStrategiesList ) ; err != nil {
return ctrl . Result { } , err
}
var autoscalingEnabled bool
for _ , autoscalingStrategy := range autoscalingStrategiesList . Items {
if autoscalingStrategy . Status . Enabled {
autoscalingEnabled = true
break
}
}
// get list of all nodes
var nodeList corev1 . NodeList
if err := r . List ( ctx , & nodeList ) ; err != nil {
logr . Error ( err , "Unable to list nodes" )
return ctrl . Result { } , err
}
// get list of all pending nodes
var pendingNodeList updatev1alpha1 . PendingNodeList
if err := r . List ( ctx , & pendingNodeList , client . InNamespace ( req . Namespace ) ) ; err != nil {
logr . Error ( err , "Unable to list pending nodes" )
return ctrl . Result { } , err
}
// get list of all scaling groups
var scalingGroupList updatev1alpha1 . ScalingGroupList
if err := r . List ( ctx , & scalingGroupList , client . InNamespace ( req . Namespace ) ) ; err != nil {
logr . Error ( err , "Unable to list scaling groups" )
return ctrl . Result { } , err
}
scalingGroupByID := make ( map [ string ] updatev1alpha1 . ScalingGroup , len ( scalingGroupList . Items ) )
for _ , scalingGroup := range scalingGroupList . Items {
2022-08-05 06:17:53 -04:00
scalingGroupByID [ strings . ToLower ( scalingGroup . Spec . GroupID ) ] = scalingGroup
2022-06-29 08:48:40 -04:00
}
annotatedNodes , invalidNodes := r . annotateNodes ( ctx , nodeList . Items )
2023-01-03 06:09:53 -05:00
groups := groupNodes ( annotatedNodes , pendingNodeList . Items , desiredNodeVersion . Spec . ImageReference , desiredNodeVersion . Spec . KubernetesComponentsReference )
2022-06-29 08:48:40 -04:00
logr . Info ( "Grouped nodes" ,
"outdatedNodes" , len ( groups . Outdated ) ,
"upToDateNodes" , len ( groups . UpToDate ) ,
"donorNodes" , len ( groups . Donors ) ,
"heirNodes" , len ( groups . Heirs ) ,
"mintNodes" , len ( groups . Mint ) ,
"pendingNodes" , len ( pendingNodeList . Items ) ,
"obsoleteNodes" , len ( groups . Obsolete ) ,
"invalidNodes" , len ( invalidNodes ) )
// extraNodes are nodes that exist in the scaling group which cannot be used for regular workloads.
// consists of nodes that are
// - being created (joining)
// - being destroyed (leaving)
// - heirs to outdated nodes
2023-01-06 06:08:25 -05:00
extraNodes := len ( groups . Heirs ) + len ( groups . AwaitingAnnotation ) + len ( pendingNodeList . Items )
2022-06-29 08:48:40 -04:00
// newNodesBudget is the maximum number of new nodes that can be created in this Reconcile call.
var newNodesBudget int
if extraNodes < nodeOverprovisionLimit {
newNodesBudget = nodeOverprovisionLimit - extraNodes
}
logr . Info ( "Budget for new nodes" , "newNodesBudget" , newNodesBudget )
2023-01-03 06:09:53 -05:00
status := nodeVersionStatus ( r . Scheme , groups , pendingNodeList . Items , invalidNodes , newNodesBudget )
2022-06-29 08:48:40 -04:00
if err := r . tryUpdateStatus ( ctx , req . NamespacedName , status ) ; err != nil {
logr . Error ( err , "Updating status" )
}
2023-01-06 06:08:25 -05:00
allNodesUpToDate := len ( groups . Outdated ) + len ( groups . Heirs ) + len ( groups . AwaitingAnnotation ) + len ( pendingNodeList . Items ) + len ( groups . Obsolete ) == 0
2022-06-29 08:48:40 -04:00
if err := r . ensureAutoscaling ( ctx , autoscalingEnabled , allNodesUpToDate ) ; err != nil {
logr . Error ( err , "Ensure autoscaling" , "autoscalingEnabledIs" , autoscalingEnabled , "autoscalingEnabledWant" , allNodesUpToDate )
return ctrl . Result { } , err
}
if allNodesUpToDate {
2023-01-03 06:09:53 -05:00
logr . Info ( "All node versions up to date" )
2022-06-29 08:48:40 -04:00
return ctrl . Result { } , nil
}
2022-09-06 11:36:08 -04:00
// should requeue is set if a node is deleted
var shouldRequeue bool
2022-06-29 08:48:40 -04:00
// find pairs of mint nodes and outdated nodes in the same scaling group to become donor & heir
2023-01-03 06:09:53 -05:00
replacementPairs := r . pairDonorsAndHeirs ( ctx , & desiredNodeVersion , groups . Outdated , groups . Mint )
2022-06-29 08:48:40 -04:00
// extend replacement pairs to include existing pairs of donors and heirs
replacementPairs = r . matchDonorsAndHeirs ( ctx , replacementPairs , groups . Donors , groups . Heirs )
// replace donor nodes by heirs
for _ , pair := range replacementPairs {
logr . Info ( "Replacing node" , "donorNode" , pair . donor . Name , "heirNode" , pair . heir . Name )
2023-01-03 06:09:53 -05:00
done , err := r . replaceNode ( ctx , & desiredNodeVersion , pair )
2022-09-06 11:36:08 -04:00
if err != nil {
2022-06-29 08:48:40 -04:00
logr . Error ( err , "Replacing node" )
return ctrl . Result { } , err
}
2022-09-06 11:36:08 -04:00
if done {
shouldRequeue = true
// remove donor annotation from heir
if err := r . patchUnsetNodeAnnotations ( ctx , pair . heir . Name , [ ] string { donorAnnotation } ) ; err != nil {
logr . Error ( err , "Unable to remove donor annotation from heir" , "heirNode" , pair . heir . Name )
}
}
2022-06-29 08:48:40 -04:00
}
2022-06-27 04:58:09 -04:00
2022-06-29 08:48:40 -04:00
// only create new nodes if the autoscaler is disabled.
// otherwise, new nodes will also be created by the autoscaler
if autoscalingEnabled {
2022-09-06 11:36:08 -04:00
return ctrl . Result { Requeue : shouldRequeue } , nil
2022-06-29 08:48:40 -04:00
}
2023-01-03 06:09:53 -05:00
newNodeConfig := newNodeConfig { desiredNodeVersion , groups . Outdated , pendingNodeList . Items , scalingGroupByID , newNodesBudget }
2022-11-21 12:01:23 -05:00
if err := r . createNewNodes ( ctx , newNodeConfig ) ; err != nil {
2022-09-06 11:36:08 -04:00
return ctrl . Result { Requeue : shouldRequeue } , nil
2022-06-29 08:48:40 -04:00
}
// cleanup obsolete nodes
for _ , node := range groups . Obsolete {
2023-01-03 06:09:53 -05:00
done , err := r . deleteNode ( ctx , & desiredNodeVersion , node )
2022-09-06 11:36:08 -04:00
if err != nil {
2022-06-29 08:48:40 -04:00
logr . Error ( err , "Unable to remove obsolete node" )
}
2022-09-06 11:36:08 -04:00
if done {
shouldRequeue = true
}
2022-06-29 08:48:40 -04:00
}
2022-06-27 04:58:09 -04:00
2022-09-06 11:36:08 -04:00
return ctrl . Result { Requeue : shouldRequeue } , nil
2022-06-27 04:58:09 -04:00
}
// SetupWithManager sets up the controller with the Manager.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) SetupWithManager ( mgr ctrl . Manager ) error {
2022-06-27 04:58:09 -04:00
return ctrl . NewControllerManagedBy ( mgr ) .
2023-01-03 06:09:53 -05:00
For ( & updatev1alpha1 . NodeVersion { } ) .
2022-06-29 08:48:40 -04:00
Watches (
& source . Kind { Type : & updatev1alpha1 . ScalingGroup { } } ,
handler . EnqueueRequestsFromMapFunc ( r . findObjectsForScalingGroup ) ,
builder . WithPredicates ( scalingGroupImageChangedPredicate ( ) ) ,
) .
Watches (
& source . Kind { Type : & updatev1alpha1 . AutoscalingStrategy { } } ,
2023-01-03 06:09:53 -05:00
handler . EnqueueRequestsFromMapFunc ( r . findAllNodeVersions ) ,
2022-06-29 08:48:40 -04:00
builder . WithPredicates ( autoscalerEnabledStatusChangedPredicate ( ) ) ,
) .
Watches (
& source . Kind { Type : & corev1 . Node { } } ,
2023-01-03 06:09:53 -05:00
handler . EnqueueRequestsFromMapFunc ( r . findAllNodeVersions ) ,
2022-06-29 08:48:40 -04:00
builder . WithPredicates ( nodeReadyPredicate ( ) ) ,
) .
Watches (
& source . Kind { Type : & nodemaintenancev1beta1 . NodeMaintenance { } } ,
2023-01-03 06:09:53 -05:00
handler . EnqueueRequestsFromMapFunc ( r . findAllNodeVersions ) ,
2022-06-29 08:48:40 -04:00
builder . WithPredicates ( nodeMaintenanceSucceededPredicate ( ) ) ,
) .
2023-01-06 06:08:25 -05:00
Watches (
& source . Kind { Type : & updatev1alpha1 . JoiningNode { } } ,
handler . EnqueueRequestsFromMapFunc ( r . findAllNodeVersions ) ,
builder . WithPredicates ( joiningNodeDeletedPredicate ( ) ) ,
) .
2022-06-29 08:48:40 -04:00
Owns ( & updatev1alpha1 . PendingNode { } ) .
2022-06-27 04:58:09 -04:00
Complete ( r )
}
2022-06-29 08:48:40 -04:00
// annotateNodes takes all nodes of the cluster and annotates them with the scaling group they are in and the image they are using.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) annotateNodes ( ctx context . Context , nodes [ ] corev1 . Node ) ( annotatedNodes , invalidNodes [ ] corev1 . Node ) {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
for _ , node := range nodes {
annotations := make ( map [ string ] string )
if node . Spec . ProviderID == "" {
logr . Info ( "Node is missing providerID" , "invalidNode" , node . Name )
invalidNodes = append ( invalidNodes , node )
continue
}
if _ , ok := node . Annotations [ scalingGroupAnnotation ] ; ! ok {
scalingGroupID , err := r . nodeReplacer . GetScalingGroupID ( ctx , node . Spec . ProviderID )
if err != nil {
logr . Error ( err , "Unable to get node scaling group" )
invalidNodes = append ( invalidNodes , node )
continue
}
annotations [ scalingGroupAnnotation ] = scalingGroupID
}
if _ , ok := node . Annotations [ nodeImageAnnotation ] ; ! ok {
nodeImage , err := r . nodeReplacer . GetNodeImage ( ctx , node . Spec . ProviderID )
if err != nil {
logr . Error ( err , "Unable to get node image" )
invalidNodes = append ( invalidNodes , node )
continue
}
annotations [ nodeImageAnnotation ] = nodeImage
}
if len ( annotations ) > 0 {
if err := r . patchNodeAnnotations ( ctx , node . Name , annotations ) ; err != nil {
logr . Error ( err , "Unable to patch node annotations" )
invalidNodes = append ( invalidNodes , node )
continue
}
if err := r . Get ( ctx , types . NamespacedName { Name : node . Name } , & node ) ; err != nil {
logr . Error ( err , "Unable to get patched node" )
invalidNodes = append ( invalidNodes , node )
continue
}
}
annotatedNodes = append ( annotatedNodes , node )
}
return annotatedNodes , invalidNodes
}
2023-01-06 06:08:25 -05:00
func ( r * NodeVersionReconciler ) tryStartClusterVersionUpgrade ( ctx context . Context , nodeVersionName types . NamespacedName ) {
// try to set the cluster version upgrade status to "in progress"
// lock the node version for cluster upgrades
if err := retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
nodeVersion := & updatev1alpha1 . NodeVersion { }
if err := r . Get ( ctx , nodeVersionName , nodeVersion ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to get node version" )
return err
}
if nodeVersion . Status . ActiveClusterVersionUpgrade {
return errors . New ( "cluster version upgrade already in progress" )
}
nodeVersion . Status . ActiveClusterVersionUpgrade = true
if err := r . Status ( ) . Update ( ctx , nodeVersion ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to update node version status" )
return err
}
return nil
} ) ; err != nil {
return
}
// get clusterKubernetesVersion from nodeVersion
nodeVersion := & updatev1alpha1 . NodeVersion { }
if err := r . Get ( ctx , nodeVersionName , nodeVersion ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to get node version" )
return
}
// get components configmap
componentsConfigMap := & corev1 . ConfigMap { }
if err := r . Get ( ctx , types . NamespacedName { Name : nodeVersion . Spec . KubernetesComponentsReference , Namespace : "kube-system" } , componentsConfigMap ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to get components configmap" )
return
}
// unmarshal components from configmap
componentsRaw := componentsConfigMap . Data [ mainconstants . ComponentsListKey ]
var clusterComponents components . Components
if err := json . Unmarshal ( [ ] byte ( componentsRaw ) , & clusterComponents ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to unmarshal components" )
return
}
log . FromContext ( ctx ) . Info ( "Starting cluster upgrade" , "clusterVersion" , nodeVersion . Spec . KubernetesClusterVersion )
kubeadm , err := clusterComponents . GetKubeadmComponent ( )
if err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to get kubeadm component" )
return
}
// talk to the upgrade-agent to start the upgrade
if err := r . Upgrade ( ctx , kubeadm . URL , kubeadm . Hash , nodeVersion . Spec . KubernetesClusterVersion ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to upgrade cluster" )
return
}
// set the cluster version upgrade status to "completed"
if err := retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
nodeVersion := & updatev1alpha1 . NodeVersion { }
if err := r . Get ( ctx , nodeVersionName , nodeVersion ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to get node version" )
return err
}
nodeVersion . Status . ActiveClusterVersionUpgrade = false
if err := r . Status ( ) . Update ( ctx , nodeVersion ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to update node version status" )
return err
}
return nil
} ) ; err != nil {
log . FromContext ( ctx ) . Error ( err , "Unable to set cluster version upgrade status to completed" )
return
}
}
2022-06-29 08:48:40 -04:00
// pairDonorsAndHeirs takes a list of outdated nodes (that do not yet have a heir node) and a list of mint nodes (nodes using the latest image) and pairs matching nodes to become donor and heir.
// outdatedNodes is also updated with heir annotations.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) pairDonorsAndHeirs ( ctx context . Context , controller metav1 . Object , outdatedNodes [ ] corev1 . Node , mintNodes [ ] mintNode ) [ ] replacementPair {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
var pairs [ ] replacementPair
for _ , mintNode := range mintNodes {
var foundReplacement bool
// find outdated node in the same group
for i := range outdatedNodes {
outdatedNode := & outdatedNodes [ i ]
2022-08-05 06:17:53 -04:00
if ! strings . EqualFold ( outdatedNode . Annotations [ scalingGroupAnnotation ] , mintNode . pendingNode . Spec . ScalingGroupID ) || len ( outdatedNode . Annotations [ heirAnnotation ] ) != 0 {
2022-06-29 08:48:40 -04:00
continue
}
// mark as donor <-> heir pair and delete "pending node" resource
if err := r . patchNodeAnnotations ( ctx , mintNode . node . Name , map [ string ] string { donorAnnotation : outdatedNode . Name } ) ; err != nil {
logr . Error ( err , "Unable to update mint node donor annotation" , "mintNode" , mintNode . node . Name )
break
}
if mintNode . node . Annotations == nil {
mintNode . node . Annotations = make ( map [ string ] string )
}
mintNode . node . Annotations [ donorAnnotation ] = outdatedNode . Name
if err := r . patchNodeAnnotations ( ctx , outdatedNode . Name , map [ string ] string { heirAnnotation : mintNode . node . Name } ) ; err != nil {
logr . Error ( err , "Unable to update outdated node heir annotation" , "outdatedNode" , outdatedNode . Name )
break
}
outdatedNode . Annotations [ heirAnnotation ] = mintNode . node . Name
if err := r . Delete ( ctx , & mintNode . pendingNode ) ; err != nil {
logr . Error ( err , "Unable to delete pending node resource" , "pendingNode" , mintNode . pendingNode . Name )
break
}
pairs = append ( pairs , replacementPair {
donor : * outdatedNode ,
heir : mintNode . node ,
} )
logr . Info ( "New matched up pair" , "donorNode" , outdatedNode . Name , "heirNode" , mintNode . node . Name )
foundReplacement = true
break
}
if ! foundReplacement {
2022-08-05 06:17:53 -04:00
logr . Info ( "No replacement found for mint node. Marking as outdated." , "mintNode" , mintNode . node . Name , "scalingGroupID" , mintNode . pendingNode . Spec . ScalingGroupID )
2022-06-29 08:48:40 -04:00
// mint node was not needed as heir. Cleanup obsolete resources.
if err := r . Delete ( ctx , & mintNode . pendingNode ) ; err != nil {
logr . Error ( err , "Unable to delete pending node resource" , "pendingNode" , mintNode . pendingNode . Name )
break
}
if err := r . patchNodeAnnotations ( ctx , mintNode . node . Name , map [ string ] string { obsoleteAnnotation : "true" } ) ; err != nil {
logr . Error ( err , "Unable to update mint node obsolete annotation" , "mintNode" , mintNode . node . Name )
break
}
if _ , err := r . deleteNode ( ctx , controller , mintNode . node ) ; err != nil {
logr . Error ( err , "Unable to delete obsolete node" , "obsoleteNode" , mintNode . node . Name )
break
}
}
}
return pairs
}
// matchDonorsAndHeirs takes separate lists of donors and heirs and matches each heir to its previously chosen donor.
// a list of replacement pairs is returned.
// donors and heirs with invalid pair references are cleaned up (the donor/heir annotations gets removed).
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) matchDonorsAndHeirs ( ctx context . Context , pairs [ ] replacementPair , donors , heirs [ ] corev1 . Node ) [ ] replacementPair {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
for _ , heir := range heirs {
var foundPair bool
for _ , donor := range donors {
if heir . Annotations [ donorAnnotation ] == donor . Name {
pairs = append ( pairs , replacementPair {
donor : donor ,
heir : heir ,
} )
foundPair = true
break
}
}
if ! foundPair {
// remove donor annotation from heir
if err := r . patchUnsetNodeAnnotations ( ctx , heir . Name , [ ] string { donorAnnotation } ) ; err != nil {
logr . Error ( err , "Unable to remove donor annotation from heir" , "heirNode" , heir . Name )
}
delete ( heir . Annotations , donorAnnotation )
}
}
// iterate over all donors and remove donor annotation from nodes that are not in a pair
// (cleanup)
for _ , donor := range donors {
var foundPair bool
for _ , pair := range pairs {
if pair . donor . Name == donor . Name {
foundPair = true
break
}
}
if ! foundPair {
// remove heir annotation from donor
if err := r . patchUnsetNodeAnnotations ( ctx , donor . Name , [ ] string { heirAnnotation } ) ; err != nil {
logr . Error ( err , "Unable to remove heir annotation from donor" , "donorNode" , donor . Name )
}
delete ( donor . Annotations , heirAnnotation )
}
}
return pairs
}
// ensureAutoscaling will ensure that the autoscaling is enabled or disabled as needed.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) ensureAutoscaling ( ctx context . Context , autoscalingEnabled bool , wantAutoscalingEnabled bool ) error {
2022-06-29 08:48:40 -04:00
if autoscalingEnabled == wantAutoscalingEnabled {
return nil
}
var autoscalingStrategiesList updatev1alpha1 . AutoscalingStrategyList
if err := r . List ( ctx , & autoscalingStrategiesList ) ; err != nil {
return err
}
for i := range autoscalingStrategiesList . Items {
if err := retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
var autoscalingStrategy updatev1alpha1 . AutoscalingStrategy
if err := r . Get ( ctx , types . NamespacedName { Name : autoscalingStrategiesList . Items [ i ] . Name } , & autoscalingStrategy ) ; err != nil {
return err
}
autoscalingStrategy . Spec . Enabled = wantAutoscalingEnabled
return r . Client . Update ( ctx , & autoscalingStrategy )
} ) ; err != nil {
return err
}
}
return nil
}
// replaceNode take a donor and a heir node and then replaces the donor node by the heir node.
//
// Replacing nodes involves the following steps:
// Labels are copied from the donor node to the heir node.
// Readiness of the heir node is awaited.
// Deletion of the donor node is scheduled.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) replaceNode ( ctx context . Context , controller metav1 . Object , pair replacementPair ) ( bool , error ) {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
if ! reflect . DeepEqual ( nodeutil . FilterLabels ( pair . donor . Labels ) , nodeutil . FilterLabels ( pair . heir . Labels ) ) {
if err := r . copyNodeLabels ( ctx , pair . donor . Name , pair . heir . Name ) ; err != nil {
logr . Error ( err , "Copy node labels" )
2022-09-06 11:36:08 -04:00
return false , err
2022-06-29 08:48:40 -04:00
}
}
heirReady := nodeutil . Ready ( & pair . heir )
if ! heirReady {
2022-09-06 11:36:08 -04:00
return false , nil
2022-06-29 08:48:40 -04:00
}
2022-09-06 11:36:08 -04:00
return r . deleteNode ( ctx , controller , pair . donor )
2022-06-29 08:48:40 -04:00
}
// deleteNode safely removes a node from the cluster and issues termination of the node by the CSP.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) deleteNode ( ctx context . Context , controller metav1 . Object , node corev1 . Node ) ( bool , error ) {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
// cordon & drain node using node-maintenance-operator
var foundNodeMaintenance nodemaintenancev1beta1 . NodeMaintenance
err := r . Get ( ctx , types . NamespacedName { Name : node . Name } , & foundNodeMaintenance )
if client . IgnoreNotFound ( err ) != nil {
// unexpected error occurred
return false , err
}
if err != nil {
// NodeMaintenance resource does not exist yet
nodeMaintenance := nodemaintenancev1beta1 . NodeMaintenance {
ObjectMeta : metav1 . ObjectMeta {
Name : node . Name ,
} ,
Spec : nodemaintenancev1beta1 . NodeMaintenanceSpec {
NodeName : node . Name ,
Reason : "node is replaced due to OS image update" ,
} ,
}
return false , r . Create ( ctx , & nodeMaintenance )
}
// NodeMaintenance resource already exists. Check cordon & drain status.
if foundNodeMaintenance . Status . Phase != nodemaintenancev1beta1 . MaintenanceSucceeded {
logr . Info ( "Cordon & drain in progress" , "maintenanceNode" , node . Name , "nodeMaintenanceStatus" , foundNodeMaintenance . Status . Phase )
return false , nil
}
// node is unused & ready to be replaced
2022-07-18 10:39:48 -04:00
if nodeutil . IsControlPlaneNode ( & node ) {
nodeVPCIP , err := nodeutil . VPCIP ( & node )
if err != nil {
logr . Error ( err , "Unable to get node VPC IP" )
return false , err
}
if err := r . RemoveEtcdMemberFromCluster ( ctx , nodeVPCIP ) ; err != nil {
logr . Error ( err , "Unable to remove etcd member from cluster" )
return false , err
}
}
2022-06-29 08:48:40 -04:00
if err := r . Delete ( ctx , & node ) ; err != nil {
logr . Error ( err , "Deleting node" )
return false , err
}
logr . Info ( "Deleted node" , "deletedNode" , node . Name )
// schedule deletion of the node with the CSP
if err := r . DeleteNode ( ctx , node . Spec . ProviderID ) ; err != nil {
logr . Error ( err , "Scheduling CSP node deletion" , "providerID" , node . Spec . ProviderID )
}
deadline := metav1 . NewTime ( time . Now ( ) . Add ( nodeLeaveTimeout ) )
pendingNode := updatev1alpha1 . PendingNode {
ObjectMeta : metav1 . ObjectMeta {
Namespace : controller . GetNamespace ( ) ,
Name : node . Name ,
} ,
Spec : updatev1alpha1 . PendingNodeSpec {
ProviderID : node . Spec . ProviderID ,
ScalingGroupID : node . Annotations [ scalingGroupAnnotation ] ,
NodeName : node . Name ,
Goal : updatev1alpha1 . NodeGoalLeave ,
Deadline : & deadline ,
} ,
}
if err := ctrl . SetControllerReference ( controller , & pendingNode , r . Scheme ) ; err != nil {
return false , err
}
if err := r . Create ( ctx , & pendingNode ) ; err != nil {
logr . Error ( err , "Tracking CSP node deletion" )
}
return true , nil
}
// createNewNodes creates new nodes using up to date images as replacement for outdated nodes.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) createNewNodes ( ctx context . Context , config newNodeConfig ) error {
2022-06-29 08:48:40 -04:00
logr := log . FromContext ( ctx )
2022-11-21 12:01:23 -05:00
if config . newNodesBudget < 1 || len ( config . outdatedNodes ) == 0 {
2022-06-29 08:48:40 -04:00
return nil
}
outdatedNodesPerScalingGroup := make ( map [ string ] int )
2022-11-21 12:01:23 -05:00
for _ , node := range config . outdatedNodes {
2022-06-29 08:48:40 -04:00
// skip outdated nodes that got assigned an heir in this Reconcile call
if len ( node . Annotations [ heirAnnotation ] ) != 0 {
continue
}
2022-08-05 06:17:53 -04:00
outdatedNodesPerScalingGroup [ strings . ToLower ( node . Annotations [ scalingGroupAnnotation ] ) ] ++
2022-06-29 08:48:40 -04:00
}
pendingJoiningNodesPerScalingGroup := make ( map [ string ] int )
2022-11-21 12:01:23 -05:00
for _ , pendingNode := range config . pendingNodes {
2022-06-29 08:48:40 -04:00
// skip pending nodes that are not joining
if pendingNode . Spec . Goal != updatev1alpha1 . NodeGoalJoin {
continue
}
2022-08-05 06:17:53 -04:00
pendingJoiningNodesPerScalingGroup [ strings . ToLower ( pendingNode . Spec . ScalingGroupID ) ] ++
2022-06-29 08:48:40 -04:00
}
requiredNodesPerScalingGroup := make ( map [ string ] int , len ( outdatedNodesPerScalingGroup ) )
for scalingGroupID := range outdatedNodesPerScalingGroup {
2022-08-05 06:17:53 -04:00
scalingGroupID := strings . ToLower ( scalingGroupID )
2022-06-29 08:48:40 -04:00
if pendingJoiningNodesPerScalingGroup [ scalingGroupID ] < outdatedNodesPerScalingGroup [ scalingGroupID ] {
requiredNodesPerScalingGroup [ scalingGroupID ] = outdatedNodesPerScalingGroup [ scalingGroupID ] - pendingJoiningNodesPerScalingGroup [ scalingGroupID ]
}
}
for scalingGroupID := range requiredNodesPerScalingGroup {
2022-11-21 12:01:23 -05:00
scalingGroup , ok := config . scalingGroupByID [ scalingGroupID ]
2022-06-29 08:48:40 -04:00
if ! ok {
2022-11-21 12:01:23 -05:00
logr . Info ( "Scaling group does not have matching resource" , "scalingGroup" , scalingGroupID , "scalingGroups" , config . scalingGroupByID )
2022-06-29 08:48:40 -04:00
continue
}
2023-01-03 06:09:53 -05:00
if ! strings . EqualFold ( scalingGroup . Status . ImageReference , config . desiredNodeVersion . Spec . ImageReference ) {
logr . Info ( "Scaling group does not use latest image" , "scalingGroup" , scalingGroupID , "usedImage" , scalingGroup . Status . ImageReference , "wantedImage" , config . desiredNodeVersion . Spec . ImageReference )
2022-06-29 08:48:40 -04:00
continue
}
if requiredNodesPerScalingGroup [ scalingGroupID ] == 0 {
continue
}
for {
2022-11-21 12:01:23 -05:00
if config . newNodesBudget == 0 {
2022-06-29 08:48:40 -04:00
return nil
}
if requiredNodesPerScalingGroup [ scalingGroupID ] == 0 {
break
}
logr . Info ( "Creating new node" , "scalingGroup" , scalingGroupID )
2022-08-05 06:17:53 -04:00
nodeName , providerID , err := r . CreateNode ( ctx , scalingGroup . Spec . GroupID )
2022-06-29 08:48:40 -04:00
if err != nil {
return err
}
deadline := metav1 . NewTime ( time . Now ( ) . Add ( nodeJoinTimeout ) )
pendingNode := & updatev1alpha1 . PendingNode {
ObjectMeta : metav1 . ObjectMeta { Name : nodeName } ,
Spec : updatev1alpha1 . PendingNodeSpec {
ProviderID : providerID ,
2022-08-05 06:17:53 -04:00
ScalingGroupID : scalingGroup . Spec . GroupID ,
2022-06-29 08:48:40 -04:00
NodeName : nodeName ,
Goal : updatev1alpha1 . NodeGoalJoin ,
Deadline : & deadline ,
} ,
}
2023-01-03 06:09:53 -05:00
if err := ctrl . SetControllerReference ( & config . desiredNodeVersion , pendingNode , r . Scheme ) ; err != nil {
2022-06-29 08:48:40 -04:00
return err
}
if err := r . Create ( ctx , pendingNode ) ; err != nil {
return err
}
logr . Info ( "Created new node" , "createdNode" , nodeName , "scalingGroup" , scalingGroupID )
requiredNodesPerScalingGroup [ scalingGroupID ] --
2022-11-21 12:01:23 -05:00
config . newNodesBudget --
2022-06-29 08:48:40 -04:00
}
}
return nil
}
// patchNodeAnnotations attempts to patch node annotations in a retry loop.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) patchNodeAnnotations ( ctx context . Context , nodeName string , annotations map [ string ] string ) error {
2022-06-29 08:48:40 -04:00
return retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
var node corev1 . Node
if err := r . Get ( ctx , types . NamespacedName { Name : nodeName } , & node ) ; err != nil {
return err
}
patchedNode := node . DeepCopy ( )
patch := patch . SetAnnotations ( & node , patchedNode , annotations )
return r . Client . Patch ( ctx , patchedNode , patch )
} )
}
// patchNodeAnnotations attempts to remove node annotations using a patch in a retry loop.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) patchUnsetNodeAnnotations ( ctx context . Context , nodeName string , annotationKeys [ ] string ) error {
2022-06-29 08:48:40 -04:00
return retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
var node corev1 . Node
if err := r . Get ( ctx , types . NamespacedName { Name : nodeName } , & node ) ; err != nil {
return err
}
patchedNode := node . DeepCopy ( )
patch := patch . UnsetAnnotations ( & node , patchedNode , annotationKeys )
return r . Client . Patch ( ctx , patchedNode , patch )
} )
}
// copyNodeLabels attempts to copy all node labels (except for reserved labels) from one node to another in a retry loop.
2023-01-03 06:09:53 -05:00
func ( r * NodeVersionReconciler ) copyNodeLabels ( ctx context . Context , oldNodeName , newNodeName string ) error {
2022-06-29 08:48:40 -04:00
return retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
var oldNode corev1 . Node
if err := r . Get ( ctx , types . NamespacedName { Name : oldNodeName } , & oldNode ) ; err != nil {
return err
}
var newNode corev1 . Node
if err := r . Get ( ctx , types . NamespacedName { Name : newNodeName } , & newNode ) ; err != nil {
return err
}
patchedNode := newNode . DeepCopy ( )
patch := patch . SetLabels ( & newNode , patchedNode , nodeutil . FilterLabels ( oldNode . GetLabels ( ) ) )
return r . Client . Patch ( ctx , patchedNode , patch )
} )
}
2023-01-03 06:09:53 -05:00
// tryUpdateStatus attempts to update the NodeVersion status field in a retry loop.
func ( r * NodeVersionReconciler ) tryUpdateStatus ( ctx context . Context , name types . NamespacedName , status updatev1alpha1 . NodeVersionStatus ) error {
2022-06-29 08:48:40 -04:00
return retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
2023-01-03 06:09:53 -05:00
var nodeVersion updatev1alpha1 . NodeVersion
if err := r . Get ( ctx , name , & nodeVersion ) ; err != nil {
2022-06-29 08:48:40 -04:00
return err
}
2023-01-03 06:09:53 -05:00
nodeVersion . Status = * status . DeepCopy ( )
2023-01-13 10:49:41 -05:00
return r . Status ( ) . Update ( ctx , & nodeVersion )
2022-06-29 08:48:40 -04:00
} )
}
2023-01-03 06:09:53 -05:00
// nodeVersionStatus generates the NodeVersion.Status field given node groups and the budget for new nodes.
func nodeVersionStatus ( scheme * runtime . Scheme , groups nodeGroups , pendingNodes [ ] updatev1alpha1 . PendingNode , invalidNodes [ ] corev1 . Node , newNodesBudget int ) updatev1alpha1 . NodeVersionStatus {
var status updatev1alpha1 . NodeVersionStatus
2022-06-29 08:48:40 -04:00
outdatedCondition := metav1 . Condition {
Type : updatev1alpha1 . ConditionOutdated ,
}
if len ( groups . Outdated ) + len ( groups . Heirs ) + len ( pendingNodes ) + len ( groups . Obsolete ) == 0 {
outdatedCondition . Status = metav1 . ConditionFalse
2023-01-03 06:09:53 -05:00
outdatedCondition . Reason = conditionNodeVersionUpToDateReason
outdatedCondition . Message = conditionNodeVersionUpToDateMessage
2022-06-29 08:48:40 -04:00
} else {
outdatedCondition . Status = metav1 . ConditionTrue
2023-01-03 06:09:53 -05:00
outdatedCondition . Reason = conditionNodeVersionOutOfDateReason
outdatedCondition . Message = conditionNodeVersionOutOfDateMessage
2022-06-29 08:48:40 -04:00
}
meta . SetStatusCondition ( & status . Conditions , outdatedCondition )
for _ , node := range groups . Outdated {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . Outdated = append ( status . Outdated , * nodeRef )
}
for _ , node := range groups . UpToDate {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . UpToDate = append ( status . UpToDate , * nodeRef )
}
for _ , node := range groups . Donors {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . Donors = append ( status . Donors , * nodeRef )
}
for _ , node := range groups . Heirs {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . Heirs = append ( status . Heirs , * nodeRef )
}
2023-01-06 06:08:25 -05:00
for _ , node := range groups . AwaitingAnnotation {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . AwaitingAnnotation = append ( status . Heirs , * nodeRef )
}
2022-06-29 08:48:40 -04:00
for _ , node := range groups . Obsolete {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . Obsolete = append ( status . Obsolete , * nodeRef )
}
for _ , node := range invalidNodes {
nodeRef , err := ref . GetReference ( scheme , & node )
if err != nil {
continue
}
status . Invalid = append ( status . Invalid , * nodeRef )
}
for _ , mintN := range groups . Mint {
nodeRef , err := ref . GetReference ( scheme , & mintN . node )
if err != nil {
continue
}
status . Mints = append ( status . Mints , * nodeRef )
}
for _ , pending := range pendingNodes {
pendingRef , err := ref . GetReference ( scheme , & pending )
if err != nil {
continue
}
status . Pending = append ( status . Pending , * pendingRef )
}
status . Budget = uint32 ( newNodesBudget )
return status
}
// mintNode is a pair of a freshly joined kubernetes nodes
// and the corresponding (left over) pending node resource.
type mintNode struct {
node corev1 . Node
pendingNode updatev1alpha1 . PendingNode
}
// replacementPair is a pair of a donor (outdated node that should be replaced)
2022-09-12 07:45:31 -04:00
// and a heir (up to date node that inherits node labels).
2022-06-29 08:48:40 -04:00
type replacementPair struct {
donor corev1 . Node
heir corev1 . Node
}
// nodeGroups is a collection of disjoint sets of nodes.
// every properly annotated kubernetes node can be placed in exactly one of the sets.
type nodeGroups struct {
// Outdated nodes are nodes that
2023-01-03 06:09:53 -05:00
// do not use the most recent version AND
2022-06-29 08:48:40 -04:00
// are not yet a donor to an up to date heir node
Outdated ,
// UpToDate nodes are nodes that
2023-01-03 06:09:53 -05:00
// use the most recent version,
2022-06-29 08:48:40 -04:00
// are not an heir to an outdated donor node AND
// are not mint nodes
UpToDate ,
// Donors are nodes that
2023-01-03 06:09:53 -05:00
// do not use the most recent version AND
2022-06-29 08:48:40 -04:00
// are paired up with an up to date heir node
Donors ,
// Heirs are nodes that
2023-01-03 06:09:53 -05:00
// use the most recent version AND
2022-06-29 08:48:40 -04:00
// are paired up with an outdated donor node
Heirs ,
2023-01-06 06:08:25 -05:00
// AwaitingAnnotation nodes are nodes that
// are missing annotations.
AwaitingAnnotation ,
2022-06-29 08:48:40 -04:00
// Obsolete nodes are nodes that
// were created by the operator as replacements (heirs)
// but could not get paired up with a donor node.
// They will be cleaned up by the operator.
Obsolete [ ] corev1 . Node
// Mint nodes are nodes that
2023-01-03 06:09:53 -05:00
// use the most recent version AND
2022-06-29 08:48:40 -04:00
// were created by the operator as replacements (heirs)
// and are awaiting pairing up with a donor node.
Mint [ ] mintNode
}
// groupNodes classifies nodes by placing each into exactly one group.
2023-01-03 06:09:53 -05:00
func groupNodes ( nodes [ ] corev1 . Node , pendingNodes [ ] updatev1alpha1 . PendingNode , latestImageReference string , latestK8sComponentsReference string ) nodeGroups {
2022-06-29 08:48:40 -04:00
groups := nodeGroups { }
for _ , node := range nodes {
if node . Annotations [ obsoleteAnnotation ] == "true" {
groups . Obsolete = append ( groups . Obsolete , node )
continue
}
2023-01-06 06:08:25 -05:00
if node . Annotations [ nodeImageAnnotation ] == "" || node . Annotations [ mainconstants . NodeKubernetesComponentsAnnotationKey ] == "" {
groups . AwaitingAnnotation = append ( groups . AwaitingAnnotation , node )
continue
}
2023-01-03 06:09:53 -05:00
if ! strings . EqualFold ( node . Annotations [ nodeImageAnnotation ] , latestImageReference ) ||
2023-01-06 14:48:03 -05:00
! strings . EqualFold ( node . Annotations [ mainconstants . NodeKubernetesComponentsAnnotationKey ] , latestK8sComponentsReference ) {
2022-06-29 08:48:40 -04:00
if heir := node . Annotations [ heirAnnotation ] ; heir != "" {
groups . Donors = append ( groups . Donors , node )
} else {
groups . Outdated = append ( groups . Outdated , node )
}
continue
}
2023-01-06 06:08:25 -05:00
if node . Annotations [ donorAnnotation ] != "" {
2022-06-29 08:48:40 -04:00
groups . Heirs = append ( groups . Heirs , node )
continue
}
if pendingNode := nodeutil . FindPending ( pendingNodes , & node ) ; pendingNode != nil {
groups . Mint = append ( groups . Mint , mintNode {
node : node ,
pendingNode : * pendingNode ,
} )
continue
}
groups . UpToDate = append ( groups . UpToDate , node )
}
return groups
}
type nodeReplacer interface {
// GetNodeImage retrieves the image currently used by a node.
GetNodeImage ( ctx context . Context , providerID string ) ( string , error )
// GetScalingGroupID retrieves the scaling group that a node is part of.
GetScalingGroupID ( ctx context . Context , providerID string ) ( string , error )
// CreateNode creates a new node inside a specified scaling group at the CSP and returns its future name and provider id.
CreateNode ( ctx context . Context , scalingGroupID string ) ( nodeName , providerID string , err error )
// DeleteNode starts the termination of the node at the CSP.
DeleteNode ( ctx context . Context , providerID string ) error
}
2022-07-18 10:39:48 -04:00
type etcdRemover interface {
// RemoveEtcdMemberFromCluster removes an etcd member from the cluster.
RemoveEtcdMemberFromCluster ( ctx context . Context , vpcIP string ) error
}
2022-11-21 12:01:23 -05:00
2023-01-06 06:08:25 -05:00
type clusterUpgrader interface {
// UpgradeCluster upgrades the cluster to the specified version.
Upgrade ( ctx context . Context , KubeadmURL , KubeadmHash , WantedKubernetesVersion string ) error
}
type kubernetesServerVersionGetter interface {
ServerVersion ( ) ( * version . Info , error )
}
2022-11-21 12:01:23 -05:00
type newNodeConfig struct {
2023-01-03 06:09:53 -05:00
desiredNodeVersion updatev1alpha1 . NodeVersion
outdatedNodes [ ] corev1 . Node
pendingNodes [ ] updatev1alpha1 . PendingNode
scalingGroupByID map [ string ] updatev1alpha1 . ScalingGroup
newNodesBudget int
2022-11-21 12:01:23 -05:00
}