upgrade: support Kubernetes components (#839)

* upgrade: add Kubernetes components to NodeVersion

* update rfc
This commit is contained in:
3u13r 2023-01-03 12:09:53 +01:00 committed by GitHub
parent 4b43311fbd
commit f14af0c3eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 897 additions and 738 deletions

View File

@ -170,9 +170,15 @@ func (k *KubeWrapper) InitCluster(
return nil, fmt.Errorf("waiting for Kubernetes API to be available: %w", err)
}
// Setup the K8s components ConfigMap.
k8sComponentsConfigMap, err := k.setupK8sComponentsConfigMap(ctx, kubernetesComponents)
if err != nil {
return nil, fmt.Errorf("failed to setup k8s version ConfigMap: %w", err)
}
// Annotate Node with the hash of the installed components
if err := k.client.AnnotateNode(ctx, nodeName,
constants.NodeKubernetesComponentsHashAnnotationKey, kubernetesComponents.GetHash(),
constants.NodeKubernetesComponentsHashAnnotationKey, k8sComponentsConfigMap,
); err != nil {
return nil, fmt.Errorf("annotating node with Kubernetes components hash: %w", err)
}
@ -247,12 +253,6 @@ func (k *KubeWrapper) InitCluster(
return nil, fmt.Errorf("installing operators: %w", err)
}
// Store the received k8sVersion in a ConfigMap, overwriting existing values (there shouldn't be any).
// Joining nodes determine the kubernetes version they will install based on this ConfigMap.
if err := k.setupK8sVersionConfigMap(ctx, k8sVersion, kubernetesComponents); err != nil {
return nil, fmt.Errorf("failed to setup k8s version ConfigMap: %w", err)
}
k.clusterUtil.FixCilium(log)
return k.GetKubeconfig()
@ -321,14 +321,15 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
return k.kubeconfigReader.ReadKubeconfig()
}
// setupK8sVersionConfigMap applies a ConfigMap (cf. server-side apply) to consistently store the installed k8s version.
func (k *KubeWrapper) setupK8sVersionConfigMap(ctx context.Context, k8sVersion versions.ValidK8sVersion, components versions.ComponentVersions) error {
// setupK8sComponentsConfigMap applies a ConfigMap (cf. server-side apply) to store the installed k8s components.
// It returns the name of the ConfigMap.
func (k *KubeWrapper) setupK8sComponentsConfigMap(ctx context.Context, components versions.ComponentVersions) (string, error) {
componentsMarshalled, err := json.Marshal(components)
if err != nil {
return fmt.Errorf("marshalling component versions: %w", err)
return "", fmt.Errorf("marshalling component versions: %w", err)
}
componentsHash := components.GetHash()
componentConfigMapName := fmt.Sprintf("k8s-component-%s", strings.ReplaceAll(componentsHash, ":", "-"))
componentConfigMapName := fmt.Sprintf("k8s-components-%s", strings.ReplaceAll(componentsHash, ":", "-"))
componentsConfig := corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
@ -346,29 +347,10 @@ func (k *KubeWrapper) setupK8sVersionConfigMap(ctx context.Context, k8sVersion v
}
if err := k.client.CreateConfigMap(ctx, componentsConfig); err != nil {
return fmt.Errorf("apply in KubeWrapper.setupK8sVersionConfigMap(..) for components config map failed with: %w", err)
return "", fmt.Errorf("apply in KubeWrapper.setupK8sVersionConfigMap(..) for components config map failed with: %w", err)
}
config := corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: constants.K8sVersionConfigMapName,
Namespace: "kube-system",
},
Data: map[string]string{
constants.K8sVersionFieldName: string(k8sVersion),
constants.K8sComponentsFieldName: componentConfigMapName,
},
}
if err := k.client.CreateConfigMap(ctx, config); err != nil {
return fmt.Errorf("apply in KubeWrapper.setupK8sVersionConfigMap(..) for version config map failed with: %w", err)
}
return nil
return componentConfigMapName, nil
}
// setupInternalConfigMap applies a ConfigMap (cf. server-side apply) to store information that is not supposed to be user-editable.

View File

@ -37,3 +37,9 @@ rules:
- create
- update
- patch
- apiGroups:
- "update.edgeless.systems"
resources:
- nodeversions
verbs:
- get

View File

@ -58,8 +58,6 @@ spec:
sources:
- configMap:
name: {{ .Values.global.joinConfigCMName | quote }}
- configMap:
name: {{ .Values.global.k8sVersionCMName | quote }}
- configMap:
name: {{ .Values.global.internalCMName | quote }}
- name: kubeadm

View File

@ -5,8 +5,6 @@ global:
serviceBasePath: /var/config
# Name of the ConfigMap that holds measurements and other info.
joinConfigCMName: join-config
# Name of the ConfigMap that holds the installed k8s version.
k8sVersionCMName: k8s-version
# Name of the ConfigMap that holds configs that should not be modified by the user.
internalCMName: internal-config

View File

@ -34,9 +34,9 @@ spec:
description: JoiningNodeSpec defines the components hash which the node
should be annotated with.
properties:
componentshash:
description: ComponentsHash is the hash of the components that were
sent to the node by the join service.
componentsreference:
description: ComponentsReference is the reference to the ConfigMap
containing the components.
type: string
deadline:
description: Deadline is the time after which the joining node is

View File

@ -1,22 +1,22 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: nodeimages.update.edgeless.systems
name: nodeversions.update.edgeless.systems
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
spec:
group: update.edgeless.systems
names:
kind: NodeImage
listKind: NodeImageList
plural: nodeimages
singular: nodeimage
kind: NodeVersion
listKind: NodeVersionList
plural: nodeversions
singular: nodeversion
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: NodeImage is the Schema for the nodeimages API.
description: NodeVersion is the Schema for the nodeimages API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
@ -31,7 +31,7 @@ spec:
metadata:
type: object
spec:
description: NodeImageSpec defines the desired state of NodeImage.
description: NodeVersionSpec defines the desired state of NodeImage.
properties:
image:
description: ImageReference is the image to use for all nodes.
@ -40,9 +40,13 @@ spec:
description: ImageVersion is the CSP independent version of the image
to use for all nodes.
type: string
kubernetesComponentsReference:
description: KubernetesComponentsReference is a reference to the ConfigMap
containing the Kubernetes components to use for all nodes.
type: string
type: object
status:
description: NodeImageStatus defines the observed state of NodeImage.
description: NodeVersionStatus defines the observed state of NodeImage.
properties:
budget:
description: Budget is the amount of extra nodes that can be created
@ -56,8 +60,8 @@ spec:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
\n type FooStatus struct{ // Represents the observations of a foo's
current state. // Known .status.conditions.type are: \"Available\",
\n type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are: \"Available\",
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
// +listType=map // +listMapKey=type Conditions []metav1.Condition
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
@ -71,8 +75,8 @@ spec:
format: date-time
type: string
message:
description: message is a human readable message indicating details
about the transition. This may be an empty string.
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
@ -86,11 +90,11 @@ spec:
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers of
specific condition types may define expected values and meanings
for this field, and whether the values are considered a guaranteed
API. The value should be a CamelCase string. This field may
not be empty.
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
@ -120,32 +124,33 @@ spec:
type: object
type: array
donors:
description: Donors is a list of outdated nodes that donate labels to
heirs.
description: Donors is a list of outdated nodes that donate labels
to heirs.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -183,32 +188,33 @@ spec:
type: object
type: array
heirs:
description: Heirs is a list of nodes using the latest image that still
need to inherit labels from donors.
description: Heirs is a list of nodes using the latest image that
still need to inherit labels from donors.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -246,33 +252,34 @@ spec:
type: object
type: array
invalid:
description: Invalid is a list of invalid nodes (nodes that cannot be
processed by the operator due to missing information or transient
description: Invalid is a list of invalid nodes (nodes that cannot
be processed by the operator due to missing information or transient
faults).
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -310,31 +317,33 @@ spec:
type: object
type: array
mints:
description: Mints is a list of up to date nodes that will become heirs.
description: Mints is a list of up to date nodes that will become
heirs.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -372,32 +381,33 @@ spec:
type: object
type: array
obsolete:
description: Obsolete is a list of obsolete nodes (nodes that have been
created by the operator but are no longer needed).
description: Obsolete is a list of obsolete nodes (nodes that have
been created by the operator but are no longer needed).
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -438,29 +448,30 @@ spec:
description: Outdated is a list of nodes that are using an outdated
image.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -501,29 +512,30 @@ spec:
description: Pending is a list of pending nodes (joining or leaving
the cluster).
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -561,32 +573,33 @@ spec:
type: object
type: array
upToDate:
description: UpToDate is a list of nodes that are using the latest image
and labels.
description: UpToDate is a list of nodes that are using the latest
image and labels.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
description: "ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, \"must refer only to types A and B\" or \"UID not honored\"
or \"name must be restricted\". Those cannot be well described
when embedded. 3. Inconsistent validation. Because the usages
are different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don't make new APIs embed an underspecified
API type they do not control. \n Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
@ -632,9 +645,3 @@ spec:
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -102,7 +102,7 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
- nodeversion
verbs:
- get
- list
@ -110,7 +110,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversion/status
verbs:
- get
- apiGroups:
- update.edgeless.systems
resources:
- nodeversions
verbs:
- create
- delete
@ -122,13 +128,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
- nodeversions/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get
- patch

View File

@ -362,7 +362,6 @@ func (i *ChartLoader) loadConstellationServicesValues(config *config.Config, mas
"kmsPort": constants.KMSPort,
"serviceBasePath": constants.ServiceBasePath,
"joinConfigCMName": constants.JoinConfigMap,
"k8sVersionCMName": constants.K8sVersionConfigMapName,
"internalCMName": constants.InternalConfigMap,
},
"kms": map[string]any{

View File

@ -105,7 +105,7 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
- nodeversion
verbs:
- get
- list
@ -113,7 +113,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversion/status
verbs:
- get
- apiGroups:
- update.edgeless.systems
resources:
- nodeversions
verbs:
- create
- delete
@ -125,13 +131,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
- nodeversions/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get
- patch

View File

@ -36,4 +36,10 @@ rules:
- get
- create
- update
- patch
- patch
- apiGroups:
- "update.edgeless.systems"
resources:
- nodeversions
verbs:
- get

View File

@ -58,8 +58,6 @@ spec:
sources:
- configMap:
name: join-config
- configMap:
name: k8s-version
- configMap:
name: internal-config
- name: kubeadm

View File

@ -105,7 +105,7 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
- nodeversion
verbs:
- get
- list
@ -113,7 +113,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversion/status
verbs:
- get
- apiGroups:
- update.edgeless.systems
resources:
- nodeversions
verbs:
- create
- delete
@ -125,13 +131,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
- nodeversions/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get
- patch

View File

@ -37,3 +37,9 @@ rules:
- create
- update
- patch
- apiGroups:
- "update.edgeless.systems"
resources:
- nodeversions
verbs:
- get

View File

@ -58,8 +58,6 @@ spec:
sources:
- configMap:
name: join-config
- configMap:
name: k8s-version
- configMap:
name: internal-config
- name: kubeadm

View File

@ -105,7 +105,7 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
- nodeversion
verbs:
- get
- list
@ -113,7 +113,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversion/status
verbs:
- get
- apiGroups:
- update.edgeless.systems
resources:
- nodeversions
verbs:
- create
- delete
@ -125,13 +131,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
- nodeversions/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get
- patch

View File

@ -36,4 +36,10 @@ rules:
- get
- create
- update
- patch
- patch
- apiGroups:
- "update.edgeless.systems"
resources:
- nodeversions
verbs:
- get

View File

@ -58,8 +58,6 @@ spec:
sources:
- configMap:
name: join-config
- configMap:
name: k8s-version
- configMap:
name: internal-config
- name: kubeadm

View File

@ -113,20 +113,13 @@ const (
// AzureCVM is the name of the file indicating whether the cluster is expected to run on CVMs or not.
AzureCVM = "azureCVM"
// K8sVersionConfigMapName is the filename of the mapped "k8s-version" configMap file.
K8sVersionConfigMapName = "k8s-version"
// K8sVersionFieldName is the key in the "k8s-version" configMap which references the string with the K8s version.
K8sVersionFieldName = "k8s-version"
// K8sComponentsFieldName is the name of the of the key holding the configMap name that holds the components configuration.
K8sComponentsFieldName = "components"
// ComponentsListKey is the name of the key holding the list of components in the components configMap.
ComponentsListKey = "components"
// NodeKubernetesComponentsHashAnnotationKey is the name of the annotation holding the hash of the installed components of this node.
NodeKubernetesComponentsHashAnnotationKey = "updates.edgeless.systems/kubernetes-components-hash"
// NodeKubernetesComponentsHashAnnotationKey is the name of the annotation holding the reference to the ConfigMap listing all K8s components.
NodeKubernetesComponentsHashAnnotationKey = "constellation.edgeless.systems/kubernetes-components"
// JoiningNodesConfigMapName is the name of the configMap holding the joining nodes with the components hashes the node-operator should annotate the nodes with.
JoiningNodesConfigMapName = "joining-nodes"

View File

@ -87,7 +87,6 @@ func main() {
server, err := server.New(
measurementSalt,
handler,
kubernetesca.New(log.Named("certificateAuthority"), handler),
kubeadm,
kms,

View File

@ -73,8 +73,26 @@ func (c *Client) getConfigMapData(ctx context.Context, name, key string) (string
return cm.Data[key], nil
}
// GetK8sComponentsRefFromNodeVersionCRD returns the K8sComponentsRef from the node version CRD.
func (c *Client) GetK8sComponentsRefFromNodeVersionCRD(ctx context.Context, nodeName string) (string, error) {
nodeVersionResource := schema.GroupVersionResource{Group: "update.edgeless.systems", Version: "v1alpha1", Resource: "nodeversions"}
nodeVersion, err := c.dynClient.Resource(nodeVersionResource).Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("failed to get node version: %w", err)
}
// Extract K8sComponentsRef from nodeVersion.
k8sComponentsRef, found, err := unstructured.NestedString(nodeVersion.Object, "spec", "kubernetesComponentsReference")
if err != nil {
return "", fmt.Errorf("failed to get K8sComponentsRef from node version: %w", err)
}
if !found {
return "", fmt.Errorf("kubernetesComponentsReference not found in node version")
}
return k8sComponentsRef, nil
}
// AddNodeToJoiningNodes adds the provided node as a joining node CRD.
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsReference string, isControlPlane bool) error {
joiningNode := &unstructured.Unstructured{}
compliantNodeName, err := k8sCompliantHostname(nodeName)
@ -98,10 +116,10 @@ func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, com
"name": objectMetadataName,
},
"spec": map[string]any{
"name": compliantNodeName,
"componentshash": componentsHash,
"iscontrolplane": isControlPlane,
"deadline": deadline,
"name": compliantNodeName,
"componentsreference": componentsReference,
"iscontrolplane": isControlPlane,
"deadline": deadline,
},
})
if isControlPlane {

View File

@ -10,13 +10,11 @@ import (
"context"
"fmt"
"net"
"path/filepath"
"time"
"github.com/edgelesssys/constellation/v2/internal/attestation"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/crypto"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/grpclog"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions"
@ -36,7 +34,6 @@ type Server struct {
measurementSalt []byte
log *logger.Logger
file file.Handler
joinTokenGetter joinTokenGetter
dataKeyGetter dataKeyGetter
ca certificateAuthority
@ -46,7 +43,7 @@ type Server struct {
// New initializes a new Server.
func New(
measurementSalt []byte, fileHandler file.Handler, ca certificateAuthority,
measurementSalt []byte, ca certificateAuthority,
joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter, log *logger.Logger,
) (*Server, error) {
kubeClient, err := kubernetes.New()
@ -56,7 +53,6 @@ func New(
return &Server{
measurementSalt: measurementSalt,
log: log,
file: fileHandler,
joinTokenGetter: joinTokenGetter,
dataKeyGetter: dataKeyGetter,
ca: ca,
@ -114,14 +110,8 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
return nil, status.Errorf(codes.Internal, "unable to generate Kubernetes join arguments: %s", err)
}
log.Infof("Querying K8sVersion ConfigMap for Kubernetes version")
k8sVersion, err := s.getK8sVersion()
if err != nil {
return nil, status.Errorf(codes.Internal, "unable to get k8s version: %s", err)
}
log.Infof("Querying K8sVersion ConfigMap for components ConfigMap name")
componentsConfigMapName, err := s.getK8sComponentsConfigMapName()
log.Infof("Querying NodeVersion CR for components ConfigMap name")
componentsConfigMapName, err := s.getK8sComponentsConfigMapName(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "unable to get components ConfigMap name: %s", err)
}
@ -160,7 +150,7 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
return nil, status.Errorf(codes.Internal, "unable to get node name from CSR: %s", err)
}
if err := s.kubeClient.AddNodeToJoiningNodes(ctx, nodeName, components.GetHash(), req.IsControlPlane); err != nil {
if err := s.kubeClient.AddNodeToJoiningNodes(ctx, nodeName, componentsConfigMapName, req.IsControlPlane); err != nil {
return nil, status.Errorf(codes.Internal, "unable to add node to joining nodes: %s", err)
}
@ -174,7 +164,6 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
DiscoveryTokenCaCertHash: kubeArgs.CACertHashes[0],
KubeletCert: kubeletCert,
ControlPlaneFiles: controlPlaneFiles,
KubernetesVersion: k8sVersion,
KubernetesComponents: components.ToJoinProto(),
}, nil
}
@ -204,26 +193,13 @@ func (s *Server) IssueRejoinTicket(ctx context.Context, req *joinproto.IssueRejo
}, nil
}
// getK8sVersion reads the k8s version from a VolumeMount that is backed by the k8s-version ConfigMap.
func (s *Server) getK8sVersion() (string, error) {
fileContent, err := s.file.Read(filepath.Join(constants.ServiceBasePath, constants.K8sVersionConfigMapName))
if err != nil {
return "", fmt.Errorf("could not read k8s version file: %w", err)
}
k8sVersion := string(fileContent)
return k8sVersion, nil
}
// getK8sComponentsConfigMapName reads the k8s components config map name from a VolumeMount that is backed by the k8s-version ConfigMap.
func (s *Server) getK8sComponentsConfigMapName() (string, error) {
fileContent, err := s.file.Read(filepath.Join(constants.ServiceBasePath, constants.K8sComponentsFieldName))
func (s *Server) getK8sComponentsConfigMapName(ctx context.Context) (string, error) {
k8sComponentsRef, err := s.kubeClient.GetK8sComponentsRefFromNodeVersionCRD(ctx, "constellation-version")
if err != nil {
return "", fmt.Errorf("could not read k8s version file: %w", err)
return "", fmt.Errorf("could not get k8s components config map name: %w", err)
}
componentsConfigMapName := string(fileContent)
return componentsConfigMapName, nil
return k8sComponentsRef, nil
}
// joinTokenGetter returns Kubernetes bootstrap (join) tokens.
@ -247,6 +223,7 @@ type certificateAuthority interface {
}
type kubeClient interface {
GetK8sComponentsRefFromNodeVersionCRD(ctx context.Context, nodeName string) (string, error)
GetComponents(ctx context.Context, configMapName string) (versions.ComponentVersions, error)
AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error
}

View File

@ -9,17 +9,13 @@ package server
import (
"context"
"errors"
"path/filepath"
"testing"
"time"
"github.com/edgelesssys/constellation/v2/internal/attestation"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/versions"
"github.com/edgelesssys/constellation/v2/joinservice/joinproto"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
@ -42,7 +38,6 @@ func TestIssueJoinTicket(t *testing.T) {
CACertHashes: []string{"hash"},
Token: "token",
}
testK8sVersion := versions.Default
components := versions.ComponentVersions{
{
@ -69,18 +64,7 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
},
"worker node components reference missing": {
kubeadm: stubTokenGetter{token: testJoinToken},
kms: stubKeyGetter{dataKeys: map[string][]byte{
uuid: testKey,
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
missingComponentsReferenceFile: true,
wantErr: true,
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
},
"kubeclient fails": {
kubeadm: stubTokenGetter{token: testJoinToken},
@ -99,7 +83,7 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node", getNameErr: someErr},
kubeClient: stubKubeClient{getComponentsVal: components},
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
wantErr: true,
},
"Cannot add node to JoiningNode CRD": {
@ -109,14 +93,14 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components, addNodeToJoiningNodesErr: someErr},
kubeClient: stubKubeClient{getComponentsVal: components, addNodeToJoiningNodesErr: someErr, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
wantErr: true,
},
"GetDataKey fails": {
kubeadm: stubTokenGetter{token: testJoinToken},
kms: stubKeyGetter{dataKeys: make(map[string][]byte), getDataKeyErr: someErr},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
wantErr: true,
},
"GetJoinToken fails": {
@ -126,7 +110,7 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
wantErr: true,
},
"GetCertificate fails": {
@ -136,7 +120,7 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{getCertErr: someErr, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
wantErr: true,
},
"control plane": {
@ -150,7 +134,7 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
},
"GetControlPlaneCertificateKey fails": {
isControlPlane: true,
@ -160,7 +144,7 @@ func TestIssueJoinTicket(t *testing.T) {
attestation.MeasurementSecretContext: measurementSecret,
}},
ca: stubCA{cert: testCert, nodeName: "node"},
kubeClient: stubKubeClient{getComponentsVal: components},
kubeClient: stubKubeClient{getComponentsVal: components, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"},
wantErr: true,
},
}
@ -170,19 +154,10 @@ func TestIssueJoinTicket(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
handler := file.NewHandler(afero.NewMemMapFs())
// IssueJoinTicket tries to read the k8s-version ConfigMap from a mounted file.
require.NoError(handler.Write(filepath.Join(constants.ServiceBasePath, constants.K8sVersionConfigMapName), []byte(testK8sVersion), file.OptNone))
if !tc.missingComponentsReferenceFile {
require.NoError(handler.Write(filepath.Join(constants.ServiceBasePath, constants.K8sComponentsFieldName), []byte(testK8sVersion), file.OptNone))
}
salt := []byte{0xA, 0xB, 0xC}
api := Server{
measurementSalt: salt,
file: handler,
ca: tc.ca,
joinTokenGetter: tc.kubeadm,
dataKeyGetter: tc.kms,
@ -210,7 +185,7 @@ func TestIssueJoinTicket(t *testing.T) {
assert.Equal(tc.ca.cert, resp.KubeletCert)
assert.Equal(tc.kubeClient.getComponentsVal.ToJoinProto(), resp.KubernetesComponents)
assert.Equal(tc.ca.nodeName, tc.kubeClient.joiningNodeName)
assert.Equal(tc.kubeClient.getComponentsVal.GetHash(), tc.kubeClient.componentsHash)
assert.Equal(tc.kubeClient.getK8sComponentsRefFromNodeVersionCRDVal, tc.kubeClient.componentsRef)
if tc.isControlPlane {
assert.Len(resp.ControlPlaneFiles, len(tc.kubeadm.files))
@ -249,7 +224,6 @@ func TestIssueRejoinTicker(t *testing.T) {
require := require.New(t)
api := Server{
file: file.Handler{},
ca: stubCA{},
joinTokenGetter: stubTokenGetter{},
dataKeyGetter: tc.keyGetter,
@ -315,17 +289,24 @@ type stubKubeClient struct {
getComponentsVal versions.ComponentVersions
getComponentsErr error
getK8sComponentsRefFromNodeVersionCRDErr error
getK8sComponentsRefFromNodeVersionCRDVal string
addNodeToJoiningNodesErr error
joiningNodeName string
componentsHash string
componentsRef string
}
func (s *stubKubeClient) GetK8sComponentsRefFromNodeVersionCRD(ctx context.Context, nodeName string) (string, error) {
return s.getK8sComponentsRefFromNodeVersionCRDVal, s.getK8sComponentsRefFromNodeVersionCRDErr
}
func (s *stubKubeClient) GetComponents(ctx context.Context, configMapName string) (versions.ComponentVersions, error) {
return s.getComponentsVal, s.getComponentsErr
}
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsRef string, isControlPlane bool) error {
s.joiningNodeName = nodeName
s.componentsHash = componentsHash
s.componentsRef = componentsRef
return s.addNodeToJoiningNodesErr
}

View File

@ -12,7 +12,7 @@ resources:
controller: true
domain: edgeless.systems
group: update
kind: NodeImage
kind: NodeVersion
path: github.com/edgelesssys/constellation/operators/constellation-node-operator/api/v1alpha1
version: v1alpha1
- api:

View File

@ -15,14 +15,14 @@ In particular, it is responsible for updating the OS images of nodes by replacin
The operator has multiple controllers with corresponding custom resource definitions (CRDs) that are responsible for the following high level tasks:
### NodeImage
### NodeVersion
`NodeImage` is the only user controlled CRD. The spec allows an administrator to update the desired image and trigger a rolling update.
`NodeVersion` is the only user controlled CRD. The spec allows an administrator to update the desired image and trigger a rolling update.
Example for GCP:
```yaml
apiVersion: update.edgeless.systems/v1alpha1
kind: NodeImage
kind: NodeVersion
metadata:
name: constellation-os
spec:
@ -32,7 +32,7 @@ spec:
Example for Azure:
```yaml
apiVersion: update.edgeless.systems/v1alpha1
kind: NodeImage
kind: NodeVersion
metadata:
name: constellation-os
spec:
@ -42,7 +42,7 @@ spec:
### AutoscalingStrategy
`AutoscalingStrategy` is used and modified by the `NodeImage` controller to pause the `cluster-autoscaler` while an image update is in progress.
`AutoscalingStrategy` is used and modified by the `NodeVersion` controller to pause the `cluster-autoscaler` while an image update is in progress.
Example:
@ -60,7 +60,7 @@ spec:
### ScalingGroup
`ScalingGroup` represents one scaling group at the CSP. Constellation uses one scaling group for worker nodes and one for control-plane nodes.
The scaling group controller will automatically set the image used for newly created nodes to be the image set in the `NodeImage` Spec. On cluster creation, one instance of the `ScalingGroup` resource per scaling group at the CSP is created. It does not need to be updated manually.
The scaling group controller will automatically set the image used for newly created nodes to be the image set in the `NodeVersion` Spec. On cluster creation, one instance of the `ScalingGroup` resource per scaling group at the CSP is created. It does not need to be updated manually.
Example for GCP:

View File

@ -14,8 +14,8 @@ import (
type JoiningNodeSpec struct {
// Name of the node expected to join.
Name string `json:"name,omitempty"`
// ComponentsHash is the hash of the components that were sent to the node by the join service.
ComponentsHash string `json:"componentshash,omitempty"`
// ComponentsReference is the reference to the ConfigMap containing the components.
ComponentsReference string `json:"componentsreference,omitempty"`
// IsControlPlane is true if the node is a control plane node.
IsControlPlane bool `json:"iscontrolplane,omitempty"`
// Deadline is the time after which the joining node is considered to have failed.

View File

@ -11,16 +11,18 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NodeImageSpec defines the desired state of NodeImage.
type NodeImageSpec struct {
// NodeVersionSpec defines the desired state of NodeVersion.
type NodeVersionSpec struct {
// ImageReference is the image to use for all nodes.
ImageReference string `json:"image,omitempty"`
// ImageVersion is the CSP independent version of the image to use for all nodes.
ImageVersion string `json:"imageVersion,omitempty"`
// KubernetesComponentsReference is a reference to the ConfigMap containing the Kubernetes components to use for all nodes.
KubernetesComponentsReference string `json:"kubernetesComponentsReference,omitempty"`
}
// NodeImageStatus defines the observed state of NodeImage.
type NodeImageStatus struct {
// NodeVersionStatus defines the observed state of NodeVersion.
type NodeVersionStatus struct {
// Outdated is a list of nodes that are using an outdated image.
Outdated []corev1.ObjectReference `json:"outdated,omitempty"`
// UpToDate is a list of nodes that are using the latest image and labels.
@ -47,24 +49,24 @@ type NodeImageStatus struct {
//+kubebuilder:subresource:status
//+kubebuilder:resource:scope=Cluster
// NodeImage is the Schema for the nodeimages API.
type NodeImage struct {
// NodeVersion is the Schema for the nodeversions API.
type NodeVersion struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NodeImageSpec `json:"spec,omitempty"`
Status NodeImageStatus `json:"status,omitempty"`
Spec NodeVersionSpec `json:"spec,omitempty"`
Status NodeVersionStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// NodeImageList contains a list of NodeImage.
type NodeImageList struct {
// NodeVersionList contains a list of NodeVersion.
type NodeVersionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []NodeImage `json:"items"`
Items []NodeVersion `json:"items"`
}
func init() {
SchemeBuilder.Register(&NodeImage{}, &NodeImageList{})
SchemeBuilder.Register(&NodeVersion{}, &NodeVersionList{})
}

View File

@ -22,8 +22,8 @@ const (
// ScalingGroupSpec defines the desired state of ScalingGroup.
type ScalingGroupSpec struct {
// NodeImage is the name of the NodeImage resource.
NodeImage string `json:"nodeImage,omitempty"`
// NodeVersion is the name of the NodeVersion resource.
NodeVersion string `json:"nodeImage,omitempty"`
// GroupID is the CSP specific, canonical identifier of a scaling group.
GroupID string `json:"groupId,omitempty"`
// AutoscalerGroupName is name that is expected by the autoscaler.

View File

@ -201,7 +201,7 @@ func (in *JoiningNodeStatus) DeepCopy() *JoiningNodeStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImage) DeepCopyInto(out *NodeImage) {
func (in *NodeVersion) DeepCopyInto(out *NodeVersion) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
@ -209,18 +209,18 @@ func (in *NodeImage) DeepCopyInto(out *NodeImage) {
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImage.
func (in *NodeImage) DeepCopy() *NodeImage {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVersion.
func (in *NodeVersion) DeepCopy() *NodeVersion {
if in == nil {
return nil
}
out := new(NodeImage)
out := new(NodeVersion)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeImage) DeepCopyObject() runtime.Object {
func (in *NodeVersion) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@ -228,31 +228,31 @@ func (in *NodeImage) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImageList) DeepCopyInto(out *NodeImageList) {
func (in *NodeVersionList) DeepCopyInto(out *NodeVersionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NodeImage, len(*in))
*out = make([]NodeVersion, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImageList.
func (in *NodeImageList) DeepCopy() *NodeImageList {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVersionList.
func (in *NodeVersionList) DeepCopy() *NodeVersionList {
if in == nil {
return nil
}
out := new(NodeImageList)
out := new(NodeVersionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeImageList) DeepCopyObject() runtime.Object {
func (in *NodeVersionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@ -260,22 +260,22 @@ func (in *NodeImageList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImageSpec) DeepCopyInto(out *NodeImageSpec) {
func (in *NodeVersionSpec) DeepCopyInto(out *NodeVersionSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImageSpec.
func (in *NodeImageSpec) DeepCopy() *NodeImageSpec {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVersionSpec.
func (in *NodeVersionSpec) DeepCopy() *NodeVersionSpec {
if in == nil {
return nil
}
out := new(NodeImageSpec)
out := new(NodeVersionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImageStatus) DeepCopyInto(out *NodeImageStatus) {
func (in *NodeVersionStatus) DeepCopyInto(out *NodeVersionStatus) {
*out = *in
if in.Outdated != nil {
in, out := &in.Outdated, &out.Outdated
@ -326,12 +326,12 @@ func (in *NodeImageStatus) DeepCopyInto(out *NodeImageStatus) {
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImageStatus.
func (in *NodeImageStatus) DeepCopy() *NodeImageStatus {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVersionStatus.
func (in *NodeVersionStatus) DeepCopy() *NodeVersionStatus {
if in == nil {
return nil
}
out := new(NodeImageStatus)
out := new(NodeVersionStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -36,9 +36,9 @@ spec:
description: JoiningNodeSpec defines the components hash which the node
should be annotated with.
properties:
componentshash:
description: ComponentsHash is the hash of the components that were
sent to the node by the join service.
componentsreference:
description: ComponentsReference is the reference to the ConfigMap
containing the components.
type: string
deadline:
description: Deadline is the time after which the joining node is

View File

@ -5,20 +5,20 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: nodeimages.update.edgeless.systems
name: nodeversions.update.edgeless.systems
spec:
group: update.edgeless.systems
names:
kind: NodeImage
listKind: NodeImageList
plural: nodeimages
singular: nodeimage
kind: NodeVersion
listKind: NodeVersionList
plural: nodeversions
singular: nodeversion
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: NodeImage is the Schema for the nodeimages API.
description: NodeVersion is the Schema for the nodeversions API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
@ -33,7 +33,7 @@ spec:
metadata:
type: object
spec:
description: NodeImageSpec defines the desired state of NodeImage.
description: NodeVersionSpec defines the desired state of NodeVersion.
properties:
image:
description: ImageReference is the image to use for all nodes.
@ -42,9 +42,13 @@ spec:
description: ImageVersion is the CSP independent version of the image
to use for all nodes.
type: string
kubernetesComponentsReference:
description: KubernetesComponentsReference is a reference to the ConfigMap
containing the Kubernetes components to use for all nodes.
type: string
type: object
status:
description: NodeImageStatus defines the observed state of NodeImage.
description: NodeVersionStatus defines the observed state of NodeVersion.
properties:
budget:
description: Budget is the amount of extra nodes that can be created

View File

@ -57,7 +57,7 @@ spec:
format: int32
type: integer
nodeImage:
description: NodeImage is the name of the NodeImage resource.
description: NodeVersion is the name of the NodeVersion resource.
type: string
role:
description: Role is the role of the nodes in the scaling group.

View File

@ -2,7 +2,7 @@
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/update.edgeless.systems_nodeimages.yaml
- bases/update.edgeless.systems_nodeversions.yaml
- bases/update.edgeless.systems_joiningnodes.yaml
- bases/update.edgeless.systems_autoscalingstrategies.yaml
- bases/update.edgeless.systems_scalinggroups.yaml
@ -12,7 +12,7 @@ resources:
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_nodeimages.yaml
#- patches/webhook_in_nodeversions.yaml
#- patches/webhook_in_joiningnodes.yaml
#- patches/webhook_in_autoscalingstrategies.yaml
#- patches/webhook_in_scalinggroups.yaml
@ -21,7 +21,7 @@ patchesStrategicMerge:
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_nodeimages.yaml
#- patches/cainjection_in_nodeversions.yaml
#- patches/cainjection_in_joiningnodes.yaml
#- patches/cainjection_in_autoscalingstrategies.yaml
#- patches/cainjection_in_scalinggroups.yaml

View File

@ -4,4 +4,4 @@ kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: nodeimages.update.edgeless.systems
name: nodeversions.update.edgeless.systems

View File

@ -2,7 +2,7 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: nodeimages.update.edgeless.systems
name: nodeversions.update.edgeless.systems
spec:
conversion:
strategy: Webhook

View File

@ -5,6 +5,12 @@ generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- name: manager-config
files:
- files:
- controller_manager_config.yaml
name: manager-config
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: ghcr.io/edgelesssys/constellation/node-operator
newTag: v0.0.1

View File

@ -16,10 +16,10 @@ spec:
kind: AutoscalingStrategy
name: autoscalingstrategies.update.edgeless.systems
version: v1alpha1
- description: NodeImage is the Schema for the nodeimages API.
displayName: Node Image
kind: NodeImage
name: nodeimages.update.edgeless.systems
- description: NodeVersion is the Schema for the nodeversions API.
displayName: Node Version
kind: NodeVersion
name: nodeversions.update.edgeless.systems
version: v1alpha1
- description: PendingNode is the Schema for the pendingnodes API.
displayName: Pending Node

View File

@ -1,13 +1,13 @@
# permissions for end users to edit nodeimages.
# permissions for end users to edit nodeversions.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nodeimage-editor-role
name: nodeversion-editor-role
rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversions
verbs:
- create
- delete
@ -19,6 +19,6 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get

View File

@ -1,13 +1,13 @@
# permissions for end users to view nodeimages.
# permissions for end users to view nodeversions.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nodeimage-viewer-role
name: nodeversion-viewer-role
rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversions
verbs:
- get
- list
@ -15,6 +15,6 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get

View File

@ -101,7 +101,7 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
- nodeversion
verbs:
- get
- list
@ -109,7 +109,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
- nodeversion/status
verbs:
- get
- apiGroups:
- update.edgeless.systems
resources:
- nodeversions
verbs:
- create
- delete
@ -121,13 +127,13 @@ rules:
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
- nodeversions/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
- nodeversions/status
verbs:
- get
- patch

View File

@ -1,6 +1,6 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- update_v1alpha1_nodeimage.yaml
- update_v1alpha1_nodeversion.yaml
- update_v1alpha1_autoscalingstrategy.yaml
- update_v1alpha1_scalinggroup.yaml
- update_v1alpha1_pendingnode.yaml

View File

@ -1,5 +1,5 @@
apiVersion: update.edgeless.systems/v1alpha1
kind: NodeImage
kind: NodeVersion
metadata:
name: constellation-os-azure
namespace: kube-system
@ -7,7 +7,7 @@ spec:
image: "/subscriptions/<subscription-id>/resourceGroups/<resource-group>/providers/Microsoft.Compute/galleries/<gallery-name>/images/<image-definition-name>/versions/<version>"
---
apiVersion: update.edgeless.systems/v1alpha1
kind: NodeImage
kind: NodeVersion
metadata:
name: constellation-os-gcp
namespace: kube-system

View File

@ -26,8 +26,8 @@ import (
)
const (
// NodeKubernetesComponentsHashAnnotationKey is the name of the annotation holding the hash of the installed components of this node.
NodeKubernetesComponentsHashAnnotationKey = "updates.edgeless.systems/kubernetes-components-hash"
// NodeKubernetesComponentsReferenceAnnotationKey is the name of the annotation holding the reference to the ConfigMap listing all K8s components.
NodeKubernetesComponentsReferenceAnnotationKey = "constellation.edgeless.systems/kubernetes-components"
joiningNodeNameKey = ".spec.name"
)
@ -76,7 +76,7 @@ func (r *JoiningNodesReconciler) Reconcile(ctx context.Context, req ctrl.Request
if node.Annotations == nil {
node.Annotations = map[string]string{}
}
node.Annotations[NodeKubernetesComponentsHashAnnotationKey] = joiningNode.Spec.ComponentsHash
node.Annotations[NodeKubernetesComponentsReferenceAnnotationKey] = joiningNode.Spec.ComponentsReference
return r.Update(ctx, &node)
})
if err != nil {

View File

@ -23,12 +23,12 @@ import (
var _ = Describe("JoiningNode controller", func() {
const (
nodeName1 = "node-name-1"
nodeName2 = "node-name-2"
nodeName3 = "node-name-3"
componentsHash1 = "test-hash-1"
componentsHash2 = "test-hash-2"
componentsHash3 = "test-hash-3"
nodeName1 = "node-name-1"
nodeName2 = "node-name-2"
nodeName3 = "node-name-3"
ComponentsReference1 = "test-ref-1"
ComponentsReference2 = "test-ref-2"
ComponentsReference3 = "test-ref-3"
timeout = time.Second * 20
duration = time.Second * 2
@ -47,8 +47,8 @@ var _ = Describe("JoiningNode controller", func() {
Name: nodeName1,
},
Spec: updatev1alpha1.JoiningNodeSpec{
Name: nodeName1,
ComponentsHash: componentsHash1,
Name: nodeName1,
ComponentsReference: ComponentsReference1,
},
}
Expect(k8sClient.Create(ctx, joiningNode)).Should(Succeed())
@ -57,7 +57,7 @@ var _ = Describe("JoiningNode controller", func() {
return k8sClient.Get(ctx, types.NamespacedName{Name: nodeName1}, createdJoiningNode)
}, timeout, interval).Should(Succeed())
Expect(createdJoiningNode.Spec.Name).Should(Equal(nodeName1))
Expect(createdJoiningNode.Spec.ComponentsHash).Should(Equal(componentsHash1))
Expect(createdJoiningNode.Spec.ComponentsReference).Should(Equal(ComponentsReference1))
By("creating a node")
node := &corev1.Node{
@ -80,8 +80,8 @@ var _ = Describe("JoiningNode controller", func() {
By("annotating the node")
Eventually(func() string {
_ = k8sClient.Get(ctx, types.NamespacedName{Name: nodeName1}, createdNode)
return createdNode.Annotations[NodeKubernetesComponentsHashAnnotationKey]
}, timeout, interval).Should(Equal(componentsHash1))
return createdNode.Annotations[NodeKubernetesComponentsReferenceAnnotationKey]
}, timeout, interval).Should(Equal(ComponentsReference1))
By("deleting the joining node resource")
Eventually(func() error {
@ -119,8 +119,8 @@ var _ = Describe("JoiningNode controller", func() {
Name: nodeName2,
},
Spec: updatev1alpha1.JoiningNodeSpec{
Name: nodeName2,
ComponentsHash: componentsHash2,
Name: nodeName2,
ComponentsReference: ComponentsReference2,
},
}
Expect(k8sClient.Create(ctx, joiningNode)).Should(Succeed())
@ -129,13 +129,13 @@ var _ = Describe("JoiningNode controller", func() {
return k8sClient.Get(ctx, types.NamespacedName{Name: joiningNode.Name}, createdJoiningNode)
}, timeout, interval).Should(Succeed())
Expect(createdJoiningNode.Spec.Name).Should(Equal(nodeName2))
Expect(createdJoiningNode.Spec.ComponentsHash).Should(Equal(componentsHash2))
Expect(createdJoiningNode.Spec.ComponentsReference).Should(Equal(ComponentsReference2))
By("annotating the node")
Eventually(func() string {
_ = k8sClient.Get(ctx, types.NamespacedName{Name: createdNode.Name}, createdNode)
return createdNode.Annotations[NodeKubernetesComponentsHashAnnotationKey]
}, timeout, interval).Should(Equal(componentsHash2))
return createdNode.Annotations[NodeKubernetesComponentsReferenceAnnotationKey]
}, timeout, interval).Should(Equal(ComponentsReference2))
By("deleting the joining node resource")
Eventually(func() error {
@ -154,8 +154,8 @@ var _ = Describe("JoiningNode controller", func() {
Name: nodeName3,
},
Spec: updatev1alpha1.JoiningNodeSpec{
Name: nodeName3,
ComponentsHash: componentsHash3,
Name: nodeName3,
ComponentsReference: ComponentsReference3,
// create without deadline first
},
}
@ -165,7 +165,7 @@ var _ = Describe("JoiningNode controller", func() {
return k8sClient.Get(ctx, types.NamespacedName{Name: joiningNode.Name}, createdJoiningNode)
}, timeout, interval).Should(Succeed())
Expect(createdJoiningNode.Spec.Name).Should(Equal(nodeName3))
Expect(createdJoiningNode.Spec.ComponentsHash).Should(Equal(componentsHash3))
Expect(createdJoiningNode.Spec.ComponentsReference).Should(Equal(ComponentsReference3))
By("setting the deadline to the past")
createdJoiningNode.Spec.Deadline = &metav1.Time{Time: fakes.clock.Now().Add(-time.Second)}

View File

@ -38,29 +38,29 @@ const (
// nodeJoinTimeout is the time limit pending nodes have to join the cluster before being terminated.
nodeJoinTimeout = time.Minute * 30
// nodeLeaveTimeout is the time limit pending nodes have to leave the cluster and being terminated.
nodeLeaveTimeout = time.Minute
donorAnnotation = "constellation.edgeless.systems/donor"
heirAnnotation = "constellation.edgeless.systems/heir"
scalingGroupAnnotation = "constellation.edgeless.systems/scaling-group-id"
nodeImageAnnotation = "constellation.edgeless.systems/node-image"
obsoleteAnnotation = "constellation.edgeless.systems/obsolete"
conditionNodeImageUpToDateReason = "NodeImagesUpToDate"
conditionNodeImageUpToDateMessage = "Node image of every node is up to date"
conditionNodeImageOutOfDateReason = "NodeImagesOutOfDate"
conditionNodeImageOutOfDateMessage = "Some node images are out of date"
nodeLeaveTimeout = time.Minute
donorAnnotation = "constellation.edgeless.systems/donor"
heirAnnotation = "constellation.edgeless.systems/heir"
scalingGroupAnnotation = "constellation.edgeless.systems/scaling-group-id"
nodeImageAnnotation = "constellation.edgeless.systems/node-image"
obsoleteAnnotation = "constellation.edgeless.systems/obsolete"
conditionNodeVersionUpToDateReason = "NodeVersionsUpToDate"
conditionNodeVersionUpToDateMessage = "Node version of every node is up to date"
conditionNodeVersionOutOfDateReason = "NodeVersionsOutOfDate"
conditionNodeVersionOutOfDateMessage = "Some node versions are out of date"
)
// NodeImageReconciler reconciles a NodeImage object.
type NodeImageReconciler struct {
// NodeVersionReconciler reconciles a NodeVersion object.
type NodeVersionReconciler struct {
nodeReplacer
etcdRemover
client.Client
Scheme *runtime.Scheme
}
// NewNodeImageReconciler creates a new NodeImageReconciler.
func NewNodeImageReconciler(nodeReplacer nodeReplacer, etcdRemover etcdRemover, client client.Client, scheme *runtime.Scheme) *NodeImageReconciler {
return &NodeImageReconciler{
// NewNodeVersionReconciler creates a new NodeVersionReconciler.
func NewNodeVersionReconciler(nodeReplacer nodeReplacer, etcdRemover etcdRemover, client client.Client, scheme *runtime.Scheme) *NodeVersionReconciler {
return &NodeVersionReconciler{
nodeReplacer: nodeReplacer,
etcdRemover: etcdRemover,
Client: client,
@ -68,20 +68,20 @@ func NewNodeImageReconciler(nodeReplacer nodeReplacer, etcdRemover etcdRemover,
}
}
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeimages,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeimages/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeimages/finalizers,verbs=update
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversions,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversions/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversions/finalizers,verbs=update
//+kubebuilder:rbac:groups=nodemaintenance.medik8s.io,resources=nodemaintenances,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get
// Reconcile replaces outdated nodes (using an old image) with new nodes (using a new image) as specified in the NodeImage spec.
func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// Reconcile replaces outdated nodes (using an old image) with new nodes (using a new image) as specified in the NodeVersion spec.
func (r *NodeVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logr := log.FromContext(ctx)
logr.Info("Reconciling NodeImage")
logr.Info("Reconciling NodeVersion")
var desiredNodeImage updatev1alpha1.NodeImage
if err := r.Get(ctx, req.NamespacedName, &desiredNodeImage); err != nil {
var desiredNodeVersion updatev1alpha1.NodeVersion
if err := r.Get(ctx, req.NamespacedName, &desiredNodeVersion); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// get list of autoscaling strategies
@ -122,7 +122,7 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
scalingGroupByID[strings.ToLower(scalingGroup.Spec.GroupID)] = scalingGroup
}
annotatedNodes, invalidNodes := r.annotateNodes(ctx, nodeList.Items)
groups := groupNodes(annotatedNodes, pendingNodeList.Items, desiredNodeImage.Spec.ImageReference)
groups := groupNodes(annotatedNodes, pendingNodeList.Items, desiredNodeVersion.Spec.ImageReference, desiredNodeVersion.Spec.KubernetesComponentsReference)
logr.Info("Grouped nodes",
"outdatedNodes", len(groups.Outdated),
@ -147,7 +147,7 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
logr.Info("Budget for new nodes", "newNodesBudget", newNodesBudget)
status := nodeImageStatus(r.Scheme, groups, pendingNodeList.Items, invalidNodes, newNodesBudget)
status := nodeVersionStatus(r.Scheme, groups, pendingNodeList.Items, invalidNodes, newNodesBudget)
if err := r.tryUpdateStatus(ctx, req.NamespacedName, status); err != nil {
logr.Error(err, "Updating status")
}
@ -159,20 +159,20 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
if allNodesUpToDate {
logr.Info("All node images up to date")
logr.Info("All node versions up to date")
return ctrl.Result{}, nil
}
// should requeue is set if a node is deleted
var shouldRequeue bool
// find pairs of mint nodes and outdated nodes in the same scaling group to become donor & heir
replacementPairs := r.pairDonorsAndHeirs(ctx, &desiredNodeImage, groups.Outdated, groups.Mint)
replacementPairs := r.pairDonorsAndHeirs(ctx, &desiredNodeVersion, groups.Outdated, groups.Mint)
// extend replacement pairs to include existing pairs of donors and heirs
replacementPairs = r.matchDonorsAndHeirs(ctx, replacementPairs, groups.Donors, groups.Heirs)
// replace donor nodes by heirs
for _, pair := range replacementPairs {
logr.Info("Replacing node", "donorNode", pair.donor.Name, "heirNode", pair.heir.Name)
done, err := r.replaceNode(ctx, &desiredNodeImage, pair)
done, err := r.replaceNode(ctx, &desiredNodeVersion, pair)
if err != nil {
logr.Error(err, "Replacing node")
return ctrl.Result{}, err
@ -192,13 +192,13 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{Requeue: shouldRequeue}, nil
}
newNodeConfig := newNodeConfig{desiredNodeImage, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget}
newNodeConfig := newNodeConfig{desiredNodeVersion, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget}
if err := r.createNewNodes(ctx, newNodeConfig); err != nil {
return ctrl.Result{Requeue: shouldRequeue}, nil
}
// cleanup obsolete nodes
for _, node := range groups.Obsolete {
done, err := r.deleteNode(ctx, &desiredNodeImage, node)
done, err := r.deleteNode(ctx, &desiredNodeVersion, node)
if err != nil {
logr.Error(err, "Unable to remove obsolete node")
}
@ -211,9 +211,9 @@ func (r *NodeImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
// SetupWithManager sets up the controller with the Manager.
func (r *NodeImageReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *NodeVersionReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&updatev1alpha1.NodeImage{}).
For(&updatev1alpha1.NodeVersion{}).
Watches(
&source.Kind{Type: &updatev1alpha1.ScalingGroup{}},
handler.EnqueueRequestsFromMapFunc(r.findObjectsForScalingGroup),
@ -221,17 +221,17 @@ func (r *NodeImageReconciler) SetupWithManager(mgr ctrl.Manager) error {
).
Watches(
&source.Kind{Type: &updatev1alpha1.AutoscalingStrategy{}},
handler.EnqueueRequestsFromMapFunc(r.findAllNodeImages),
handler.EnqueueRequestsFromMapFunc(r.findAllNodeVersions),
builder.WithPredicates(autoscalerEnabledStatusChangedPredicate()),
).
Watches(
&source.Kind{Type: &corev1.Node{}},
handler.EnqueueRequestsFromMapFunc(r.findAllNodeImages),
handler.EnqueueRequestsFromMapFunc(r.findAllNodeVersions),
builder.WithPredicates(nodeReadyPredicate()),
).
Watches(
&source.Kind{Type: &nodemaintenancev1beta1.NodeMaintenance{}},
handler.EnqueueRequestsFromMapFunc(r.findAllNodeImages),
handler.EnqueueRequestsFromMapFunc(r.findAllNodeVersions),
builder.WithPredicates(nodeMaintenanceSucceededPredicate()),
).
Owns(&updatev1alpha1.PendingNode{}).
@ -239,7 +239,7 @@ func (r *NodeImageReconciler) SetupWithManager(mgr ctrl.Manager) error {
}
// annotateNodes takes all nodes of the cluster and annotates them with the scaling group they are in and the image they are using.
func (r *NodeImageReconciler) annotateNodes(ctx context.Context, nodes []corev1.Node) (annotatedNodes, invalidNodes []corev1.Node) {
func (r *NodeVersionReconciler) annotateNodes(ctx context.Context, nodes []corev1.Node) (annotatedNodes, invalidNodes []corev1.Node) {
logr := log.FromContext(ctx)
for _, node := range nodes {
annotations := make(map[string]string)
@ -285,7 +285,7 @@ func (r *NodeImageReconciler) annotateNodes(ctx context.Context, nodes []corev1.
// pairDonorsAndHeirs takes a list of outdated nodes (that do not yet have a heir node) and a list of mint nodes (nodes using the latest image) and pairs matching nodes to become donor and heir.
// outdatedNodes is also updated with heir annotations.
func (r *NodeImageReconciler) pairDonorsAndHeirs(ctx context.Context, controller metav1.Object, outdatedNodes []corev1.Node, mintNodes []mintNode) []replacementPair {
func (r *NodeVersionReconciler) pairDonorsAndHeirs(ctx context.Context, controller metav1.Object, outdatedNodes []corev1.Node, mintNodes []mintNode) []replacementPair {
logr := log.FromContext(ctx)
var pairs []replacementPair
for _, mintNode := range mintNodes {
@ -345,7 +345,7 @@ func (r *NodeImageReconciler) pairDonorsAndHeirs(ctx context.Context, controller
// matchDonorsAndHeirs takes separate lists of donors and heirs and matches each heir to its previously chosen donor.
// a list of replacement pairs is returned.
// donors and heirs with invalid pair references are cleaned up (the donor/heir annotations gets removed).
func (r *NodeImageReconciler) matchDonorsAndHeirs(ctx context.Context, pairs []replacementPair, donors, heirs []corev1.Node) []replacementPair {
func (r *NodeVersionReconciler) matchDonorsAndHeirs(ctx context.Context, pairs []replacementPair, donors, heirs []corev1.Node) []replacementPair {
logr := log.FromContext(ctx)
for _, heir := range heirs {
var foundPair bool
@ -389,7 +389,7 @@ func (r *NodeImageReconciler) matchDonorsAndHeirs(ctx context.Context, pairs []r
}
// ensureAutoscaling will ensure that the autoscaling is enabled or disabled as needed.
func (r *NodeImageReconciler) ensureAutoscaling(ctx context.Context, autoscalingEnabled bool, wantAutoscalingEnabled bool) error {
func (r *NodeVersionReconciler) ensureAutoscaling(ctx context.Context, autoscalingEnabled bool, wantAutoscalingEnabled bool) error {
if autoscalingEnabled == wantAutoscalingEnabled {
return nil
}
@ -418,7 +418,7 @@ func (r *NodeImageReconciler) ensureAutoscaling(ctx context.Context, autoscaling
// Labels are copied from the donor node to the heir node.
// Readiness of the heir node is awaited.
// Deletion of the donor node is scheduled.
func (r *NodeImageReconciler) replaceNode(ctx context.Context, controller metav1.Object, pair replacementPair) (bool, error) {
func (r *NodeVersionReconciler) replaceNode(ctx context.Context, controller metav1.Object, pair replacementPair) (bool, error) {
logr := log.FromContext(ctx)
if !reflect.DeepEqual(nodeutil.FilterLabels(pair.donor.Labels), nodeutil.FilterLabels(pair.heir.Labels)) {
if err := r.copyNodeLabels(ctx, pair.donor.Name, pair.heir.Name); err != nil {
@ -434,7 +434,7 @@ func (r *NodeImageReconciler) replaceNode(ctx context.Context, controller metav1
}
// deleteNode safely removes a node from the cluster and issues termination of the node by the CSP.
func (r *NodeImageReconciler) deleteNode(ctx context.Context, controller metav1.Object, node corev1.Node) (bool, error) {
func (r *NodeVersionReconciler) deleteNode(ctx context.Context, controller metav1.Object, node corev1.Node) (bool, error) {
logr := log.FromContext(ctx)
// cordon & drain node using node-maintenance-operator
var foundNodeMaintenance nodemaintenancev1beta1.NodeMaintenance
@ -509,7 +509,7 @@ func (r *NodeImageReconciler) deleteNode(ctx context.Context, controller metav1.
}
// createNewNodes creates new nodes using up to date images as replacement for outdated nodes.
func (r *NodeImageReconciler) createNewNodes(ctx context.Context, config newNodeConfig) error {
func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNodeConfig) error {
logr := log.FromContext(ctx)
if config.newNodesBudget < 1 || len(config.outdatedNodes) == 0 {
return nil
@ -543,8 +543,8 @@ func (r *NodeImageReconciler) createNewNodes(ctx context.Context, config newNode
logr.Info("Scaling group does not have matching resource", "scalingGroup", scalingGroupID, "scalingGroups", config.scalingGroupByID)
continue
}
if !strings.EqualFold(scalingGroup.Status.ImageReference, config.desiredNodeImage.Spec.ImageReference) {
logr.Info("Scaling group does not use latest image", "scalingGroup", scalingGroupID, "usedImage", scalingGroup.Status.ImageReference, "wantedImage", config.desiredNodeImage.Spec.ImageReference)
if !strings.EqualFold(scalingGroup.Status.ImageReference, config.desiredNodeVersion.Spec.ImageReference) {
logr.Info("Scaling group does not use latest image", "scalingGroup", scalingGroupID, "usedImage", scalingGroup.Status.ImageReference, "wantedImage", config.desiredNodeVersion.Spec.ImageReference)
continue
}
if requiredNodesPerScalingGroup[scalingGroupID] == 0 {
@ -573,7 +573,7 @@ func (r *NodeImageReconciler) createNewNodes(ctx context.Context, config newNode
Deadline: &deadline,
},
}
if err := ctrl.SetControllerReference(&config.desiredNodeImage, pendingNode, r.Scheme); err != nil {
if err := ctrl.SetControllerReference(&config.desiredNodeVersion, pendingNode, r.Scheme); err != nil {
return err
}
if err := r.Create(ctx, pendingNode); err != nil {
@ -588,7 +588,7 @@ func (r *NodeImageReconciler) createNewNodes(ctx context.Context, config newNode
}
// patchNodeAnnotations attempts to patch node annotations in a retry loop.
func (r *NodeImageReconciler) patchNodeAnnotations(ctx context.Context, nodeName string, annotations map[string]string) error {
func (r *NodeVersionReconciler) patchNodeAnnotations(ctx context.Context, nodeName string, annotations map[string]string) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
var node corev1.Node
if err := r.Get(ctx, types.NamespacedName{Name: nodeName}, &node); err != nil {
@ -601,7 +601,7 @@ func (r *NodeImageReconciler) patchNodeAnnotations(ctx context.Context, nodeName
}
// patchNodeAnnotations attempts to remove node annotations using a patch in a retry loop.
func (r *NodeImageReconciler) patchUnsetNodeAnnotations(ctx context.Context, nodeName string, annotationKeys []string) error {
func (r *NodeVersionReconciler) patchUnsetNodeAnnotations(ctx context.Context, nodeName string, annotationKeys []string) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
var node corev1.Node
if err := r.Get(ctx, types.NamespacedName{Name: nodeName}, &node); err != nil {
@ -614,7 +614,7 @@ func (r *NodeImageReconciler) patchUnsetNodeAnnotations(ctx context.Context, nod
}
// copyNodeLabels attempts to copy all node labels (except for reserved labels) from one node to another in a retry loop.
func (r *NodeImageReconciler) copyNodeLabels(ctx context.Context, oldNodeName, newNodeName string) error {
func (r *NodeVersionReconciler) copyNodeLabels(ctx context.Context, oldNodeName, newNodeName string) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
var oldNode corev1.Node
if err := r.Get(ctx, types.NamespacedName{Name: oldNodeName}, &oldNode); err != nil {
@ -630,35 +630,35 @@ func (r *NodeImageReconciler) copyNodeLabels(ctx context.Context, oldNodeName, n
})
}
// tryUpdateStatus attempts to update the NodeImage status field in a retry loop.
func (r *NodeImageReconciler) tryUpdateStatus(ctx context.Context, name types.NamespacedName, status updatev1alpha1.NodeImageStatus) error {
// tryUpdateStatus attempts to update the NodeVersion status field in a retry loop.
func (r *NodeVersionReconciler) tryUpdateStatus(ctx context.Context, name types.NamespacedName, status updatev1alpha1.NodeVersionStatus) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
var nodeImage updatev1alpha1.NodeImage
if err := r.Get(ctx, name, &nodeImage); err != nil {
var nodeVersion updatev1alpha1.NodeVersion
if err := r.Get(ctx, name, &nodeVersion); err != nil {
return err
}
nodeImage.Status = *status.DeepCopy()
if err := r.Status().Update(ctx, &nodeImage); err != nil {
nodeVersion.Status = *status.DeepCopy()
if err := r.Status().Update(ctx, &nodeVersion); err != nil {
return err
}
return nil
})
}
// nodeImageStatus generates the NodeImage.Status field given node groups and the budget for new nodes.
func nodeImageStatus(scheme *runtime.Scheme, groups nodeGroups, pendingNodes []updatev1alpha1.PendingNode, invalidNodes []corev1.Node, newNodesBudget int) updatev1alpha1.NodeImageStatus {
var status updatev1alpha1.NodeImageStatus
// nodeVersionStatus generates the NodeVersion.Status field given node groups and the budget for new nodes.
func nodeVersionStatus(scheme *runtime.Scheme, groups nodeGroups, pendingNodes []updatev1alpha1.PendingNode, invalidNodes []corev1.Node, newNodesBudget int) updatev1alpha1.NodeVersionStatus {
var status updatev1alpha1.NodeVersionStatus
outdatedCondition := metav1.Condition{
Type: updatev1alpha1.ConditionOutdated,
}
if len(groups.Outdated)+len(groups.Heirs)+len(pendingNodes)+len(groups.Obsolete) == 0 {
outdatedCondition.Status = metav1.ConditionFalse
outdatedCondition.Reason = conditionNodeImageUpToDateReason
outdatedCondition.Message = conditionNodeImageUpToDateMessage
outdatedCondition.Reason = conditionNodeVersionUpToDateReason
outdatedCondition.Message = conditionNodeVersionUpToDateMessage
} else {
outdatedCondition.Status = metav1.ConditionTrue
outdatedCondition.Reason = conditionNodeImageOutOfDateReason
outdatedCondition.Message = conditionNodeImageOutOfDateMessage
outdatedCondition.Reason = conditionNodeVersionOutOfDateReason
outdatedCondition.Message = conditionNodeVersionOutOfDateMessage
}
meta.SetStatusCondition(&status.Conditions, outdatedCondition)
for _, node := range groups.Outdated {
@ -739,20 +739,20 @@ type replacementPair struct {
// every properly annotated kubernetes node can be placed in exactly one of the sets.
type nodeGroups struct {
// Outdated nodes are nodes that
// do not use the most recent image AND
// do not use the most recent version AND
// are not yet a donor to an up to date heir node
Outdated,
// UpToDate nodes are nodes that
// use the most recent image,
// use the most recent version,
// are not an heir to an outdated donor node AND
// are not mint nodes
UpToDate,
// Donors are nodes that
// do not use the most recent image AND
// do not use the most recent version AND
// are paired up with an up to date heir node
Donors,
// Heirs are nodes that
// use the most recent image AND
// use the most recent version AND
// are paired up with an outdated donor node
Heirs,
// Obsolete nodes are nodes that
@ -761,21 +761,22 @@ type nodeGroups struct {
// They will be cleaned up by the operator.
Obsolete []corev1.Node
// Mint nodes are nodes that
// use the most recent image AND
// use the most recent version AND
// were created by the operator as replacements (heirs)
// and are awaiting pairing up with a donor node.
Mint []mintNode
}
// groupNodes classifies nodes by placing each into exactly one group.
func groupNodes(nodes []corev1.Node, pendingNodes []updatev1alpha1.PendingNode, latestImageReference string) nodeGroups {
func groupNodes(nodes []corev1.Node, pendingNodes []updatev1alpha1.PendingNode, latestImageReference string, latestK8sComponentsReference string) nodeGroups {
groups := nodeGroups{}
for _, node := range nodes {
if node.Annotations[obsoleteAnnotation] == "true" {
groups.Obsolete = append(groups.Obsolete, node)
continue
}
if !strings.EqualFold(node.Annotations[nodeImageAnnotation], latestImageReference) {
if !strings.EqualFold(node.Annotations[nodeImageAnnotation], latestImageReference) ||
!strings.EqualFold(node.Annotations[NodeKubernetesComponentsReferenceAnnotationKey], latestK8sComponentsReference) {
if heir := node.Annotations[heirAnnotation]; heir != "" {
groups.Donors = append(groups.Donors, node)
} else {
@ -816,9 +817,9 @@ type etcdRemover interface {
}
type newNodeConfig struct {
desiredNodeImage updatev1alpha1.NodeImage
outdatedNodes []corev1.Node
pendingNodes []updatev1alpha1.PendingNode
scalingGroupByID map[string]updatev1alpha1.ScalingGroup
newNodesBudget int
desiredNodeVersion updatev1alpha1.NodeVersion
outdatedNodes []corev1.Node
pendingNodes []updatev1alpha1.PendingNode
scalingGroupByID map[string]updatev1alpha1.ScalingGroup
newNodesBudget int
}

View File

@ -23,15 +23,15 @@ import (
nodemaintenancev1beta1 "github.com/medik8s/node-maintenance-operator/api/v1beta1"
)
var _ = Describe("NodeImage controller", func() {
var _ = Describe("NodeVersion controller", func() {
// Define utility constants for object names and testing timeouts/durations and intervals.
const (
nodeImageResourceName = "nodeimage"
firstNodeName = "node-1"
secondNodeName = "node-2"
firstImage = "image-1"
secondImage = "image-2"
scalingGroupID = "scaling-group"
nodeVersionResourceName = "nodeversion"
firstNodeName = "node-1"
secondNodeName = "node-2"
firstVersion = "version-1"
secondVersion = "version-2"
scalingGroupID = "scaling-group"
timeout = time.Second * 20
duration = time.Second * 2
@ -40,29 +40,29 @@ var _ = Describe("NodeImage controller", func() {
firstNodeLookupKey := types.NamespacedName{Name: firstNodeName}
secondNodeLookupKey := types.NamespacedName{Name: secondNodeName}
nodeImageLookupKey := types.NamespacedName{Name: nodeImageResourceName}
nodeVersionLookupKey := types.NamespacedName{Name: nodeVersionResourceName}
scalingGroupLookupKey := types.NamespacedName{Name: scalingGroupID}
joiningPendingNodeLookupKey := types.NamespacedName{Name: secondNodeName}
nodeMaintenanceLookupKey := types.NamespacedName{Name: firstNodeName}
Context("When updating the cluster-wide node image", func() {
Context("When updating the cluster-wide node version", func() {
It("Should update every node in the cluster", func() {
By("creating a node image resource specifying the first node image")
Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupID, firstImage)).Should(Succeed())
nodeImage := &updatev1alpha1.NodeImage{
By("creating a node version resource specifying the first node version")
Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupID, firstVersion)).Should(Succeed())
nodeVersion := &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{
APIVersion: "update.edgeless.systems/v1alpha1",
Kind: "NodeImage",
Kind: "NodeVersion",
},
ObjectMeta: metav1.ObjectMeta{
Name: nodeImageResourceName,
Name: nodeVersionResourceName,
},
Spec: updatev1alpha1.NodeImageSpec{ImageReference: firstImage},
Spec: updatev1alpha1.NodeVersionSpec{ImageReference: firstVersion},
}
Expect(k8sClient.Create(ctx, nodeImage)).Should(Succeed())
Expect(k8sClient.Create(ctx, nodeVersion)).Should(Succeed())
By("creating a node resource using the first node image")
fakes.nodeReplacer.setNodeImage(firstNodeName, firstImage)
fakes.nodeReplacer.setNodeImage(firstNodeName, firstVersion)
fakes.nodeReplacer.setScalingGroupID(firstNodeName, scalingGroupID)
firstNode := &corev1.Node{
TypeMeta: metav1.TypeMeta{
@ -82,13 +82,13 @@ var _ = Describe("NodeImage controller", func() {
Expect(k8sClient.Create(ctx, firstNode)).Should(Succeed())
By("creating a scaling group resource using the first node image")
Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupID, firstImage)).Should(Succeed())
Expect(fakes.scalingGroupUpdater.SetScalingGroupImage(ctx, scalingGroupID, firstVersion)).Should(Succeed())
scalingGroup := &updatev1alpha1.ScalingGroup{
ObjectMeta: metav1.ObjectMeta{
Name: scalingGroupID,
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: nodeImageResourceName,
NodeVersion: nodeVersionResourceName,
GroupID: scalingGroupID,
Autoscaling: true,
},
@ -146,24 +146,24 @@ var _ = Describe("NodeImage controller", func() {
By("checking that all nodes are up-to-date")
Eventually(func() int {
if err := k8sClient.Get(ctx, nodeImageLookupKey, nodeImage); err != nil {
if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil {
return 0
}
return len(nodeImage.Status.UpToDate)
return len(nodeVersion.Status.UpToDate)
}, timeout, interval).Should(Equal(1))
By("updating the node image to the second image")
fakes.nodeStateGetter.setNodeState(updatev1alpha1.NodeStateReady)
fakes.nodeReplacer.setCreatedNode(secondNodeName, secondNodeName, nil)
nodeImage.Spec.ImageReference = secondImage
Expect(k8sClient.Update(ctx, nodeImage)).Should(Succeed())
nodeVersion.Spec.ImageReference = secondVersion
Expect(k8sClient.Update(ctx, nodeVersion)).Should(Succeed())
By("checking that there is an outdated node in the status")
Eventually(func() int {
if err := k8sClient.Get(ctx, nodeImageLookupKey, nodeImage); err != nil {
if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil {
return 0
}
return len(nodeImage.Status.Outdated)
return len(nodeVersion.Status.Outdated)
}, timeout, interval).Should(Equal(1))
By("checking that the scaling group is up to date")
@ -172,7 +172,7 @@ var _ = Describe("NodeImage controller", func() {
return ""
}
return scalingGroup.Status.ImageReference
}, timeout, interval).Should(Equal(secondImage))
}, timeout, interval).Should(Equal(secondVersion))
By("checking that a pending node is created")
pendingNode := &updatev1alpha1.PendingNode{}
@ -184,14 +184,14 @@ var _ = Describe("NodeImage controller", func() {
return pendingNode.Status.CSPNodeState
}).Should(Equal(updatev1alpha1.NodeStateReady))
Eventually(func() int {
if err := k8sClient.Get(ctx, nodeImageLookupKey, nodeImage); err != nil {
if err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion); err != nil {
return 0
}
return len(nodeImage.Status.Pending)
return len(nodeVersion.Status.Pending)
}, timeout, interval).Should(Equal(1))
By("creating a new node resource using the second node image")
fakes.nodeReplacer.setNodeImage(secondNodeName, secondImage)
fakes.nodeReplacer.setNodeImage(secondNodeName, secondVersion)
fakes.nodeReplacer.setScalingGroupID(secondNodeName, scalingGroupID)
secondNode := &corev1.Node{
TypeMeta: metav1.TypeMeta{
@ -214,7 +214,7 @@ var _ = Describe("NodeImage controller", func() {
}
return secondNode.Annotations
}, timeout, interval).Should(HaveKeyWithValue(scalingGroupAnnotation, scalingGroupID))
Expect(secondNode.Annotations).Should(HaveKeyWithValue(nodeImageAnnotation, secondImage))
Expect(secondNode.Annotations).Should(HaveKeyWithValue(nodeImageAnnotation, secondVersion))
By("checking that the nodes are paired as donor and heir")
Eventually(func() map[string]string {
@ -225,9 +225,9 @@ var _ = Describe("NodeImage controller", func() {
}, timeout, interval).Should(HaveKeyWithValue(heirAnnotation, secondNodeName))
Expect(k8sClient.Get(ctx, secondNodeLookupKey, secondNode)).Should(Succeed())
Expect(secondNode.Annotations).Should(HaveKeyWithValue(donorAnnotation, firstNodeName))
Expect(k8sClient.Get(ctx, nodeImageLookupKey, nodeImage)).Should(Succeed())
Expect(nodeImage.Status.Donors).Should(HaveLen(1))
Expect(nodeImage.Status.Heirs).Should(HaveLen(1))
Expect(k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion)).Should(Succeed())
Expect(nodeVersion.Status.Donors).Should(HaveLen(1))
Expect(nodeVersion.Status.Heirs).Should(HaveLen(1))
Expect(k8sClient.Get(ctx, joiningPendingNodeLookupKey, pendingNode)).Should(Not(Succeed()))
By("checking that node labels are copied to the heir")
@ -268,15 +268,15 @@ var _ = Describe("NodeImage controller", func() {
By("checking that all nodes are up-to-date")
Eventually(func() int {
err := k8sClient.Get(ctx, nodeImageLookupKey, nodeImage)
err := k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion)
if err != nil {
return 0
}
return len(nodeImage.Status.UpToDate)
return len(nodeVersion.Status.UpToDate)
}, timeout, interval).Should(Equal(1))
By("cleaning up all resources")
Expect(k8sClient.Delete(ctx, nodeImage)).Should(Succeed())
Expect(k8sClient.Delete(ctx, nodeVersion)).Should(Succeed())
Expect(k8sClient.Delete(ctx, scalingGroup)).Should(Succeed())
Expect(k8sClient.Delete(ctx, autoscalerDeployment)).Should(Succeed())
Expect(k8sClient.Delete(ctx, strategy)).Should(Succeed())

View File

@ -107,7 +107,7 @@ func TestAnnotateNodes(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
reconciler := NodeImageReconciler{
reconciler := NodeVersionReconciler{
nodeReplacer: &stubNodeReplacerReader{
nodeImage: "node-image",
scalingGroupID: "scaling-group-id",
@ -217,13 +217,13 @@ func TestPairDonorsAndHeirs(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
reconciler := NodeImageReconciler{
reconciler := NodeVersionReconciler{
nodeReplacer: &stubNodeReplacerReader{},
Client: &stubReadWriterClient{
stubReaderClient: *newStubReaderClient(t, []runtime.Object{&tc.outdatedNode, &tc.mintNode.node}, nil, nil),
},
}
nodeImage := updatev1alpha1.NodeImage{}
nodeImage := updatev1alpha1.NodeVersion{}
pairs := reconciler.pairDonorsAndHeirs(context.Background(), &nodeImage, []corev1.Node{tc.outdatedNode}, []mintNode{tc.mintNode})
if tc.wantPair == nil {
assert.Len(pairs, 0)
@ -307,7 +307,7 @@ func TestMatchDonorsAndHeirs(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
reconciler := NodeImageReconciler{
reconciler := NodeVersionReconciler{
nodeReplacer: &stubNodeReplacerReader{},
Client: &stubReadWriterClient{
stubReaderClient: *newStubReaderClient(t, []runtime.Object{&tc.donor, &tc.heir}, nil, nil),
@ -578,12 +578,12 @@ func TestCreateNewNodes(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
desiredNodeImage := updatev1alpha1.NodeImage{
Spec: updatev1alpha1.NodeImageSpec{
desiredNodeImage := updatev1alpha1.NodeVersion{
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image",
},
}
reconciler := NodeImageReconciler{
reconciler := NodeVersionReconciler{
nodeReplacer: &stubNodeReplacerWriter{},
Client: &stubReadWriterClient{
stubReaderClient: *newStubReaderClient(t, []runtime.Object{}, nil, nil),
@ -600,6 +600,7 @@ func TestCreateNewNodes(t *testing.T) {
func TestGroupNodes(t *testing.T) {
latestImageReference := "latest-image"
latestK8sComponentsReference := "latest-k8s-components-ref"
scalingGroup := "scaling-group"
wantNodeGroups := nodeGroups{
Outdated: []corev1.Node{
@ -607,8 +608,19 @@ func TestGroupNodes(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "outdated",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: "old-image",
scalingGroupAnnotation: scalingGroup,
NodeKubernetesComponentsReferenceAnnotationKey: latestK8sComponentsReference,
nodeImageAnnotation: "old-image",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "outdated",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
NodeKubernetesComponentsReferenceAnnotationKey: "old-ref",
nodeImageAnnotation: latestImageReference,
},
},
},
@ -618,8 +630,9 @@ func TestGroupNodes(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "uptodate",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
NodeKubernetesComponentsReferenceAnnotationKey: latestK8sComponentsReference,
},
},
},
@ -629,9 +642,21 @@ func TestGroupNodes(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "donor",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: "old-image",
heirAnnotation: "heir",
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: "old-image",
NodeKubernetesComponentsReferenceAnnotationKey: latestK8sComponentsReference,
heirAnnotation: "heir",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "donor",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
NodeKubernetesComponentsReferenceAnnotationKey: "old-ref",
heirAnnotation: "heir",
},
},
},
@ -641,9 +666,10 @@ func TestGroupNodes(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "heir",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
donorAnnotation: "donor",
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
NodeKubernetesComponentsReferenceAnnotationKey: latestK8sComponentsReference,
donorAnnotation: "donor",
},
},
},
@ -653,9 +679,10 @@ func TestGroupNodes(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "obsolete",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
obsoleteAnnotation: "true",
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
NodeKubernetesComponentsReferenceAnnotationKey: latestK8sComponentsReference,
obsoleteAnnotation: "true",
},
},
},
@ -666,8 +693,9 @@ func TestGroupNodes(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "mint",
Annotations: map[string]string{
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
scalingGroupAnnotation: scalingGroup,
nodeImageAnnotation: latestImageReference,
NodeKubernetesComponentsReferenceAnnotationKey: latestK8sComponentsReference,
},
},
},
@ -695,7 +723,7 @@ func TestGroupNodes(t *testing.T) {
}
assert := assert.New(t)
groups := groupNodes(nodes, pendingNodes, latestImageReference)
groups := groupNodes(nodes, pendingNodes, latestImageReference, latestK8sComponentsReference)
assert.Equal(wantNodeGroups, groups)
}

View File

@ -94,22 +94,22 @@ func nodeMaintenanceSucceededPredicate() predicate.Predicate {
}
// findObjectsForScalingGroup requests a reconcile call for the node image referenced by a scaling group.
func (r *NodeImageReconciler) findObjectsForScalingGroup(rawScalingGroup client.Object) []reconcile.Request {
func (r *NodeVersionReconciler) findObjectsForScalingGroup(rawScalingGroup client.Object) []reconcile.Request {
scalingGroup := rawScalingGroup.(*updatev1alpha1.ScalingGroup)
return []reconcile.Request{
{NamespacedName: types.NamespacedName{Name: scalingGroup.Spec.NodeImage}},
{NamespacedName: types.NamespacedName{Name: scalingGroup.Spec.NodeVersion}},
}
}
// findAllNodeImages requests a reconcile call for all node images.
func (r *NodeImageReconciler) findAllNodeImages(_ client.Object) []reconcile.Request {
var nodeImageList updatev1alpha1.NodeImageList
err := r.List(context.TODO(), &nodeImageList)
// findAllNodeVersions requests a reconcile call for all node versions.
func (r *NodeVersionReconciler) findAllNodeVersions(_ client.Object) []reconcile.Request {
var nodeVersionList updatev1alpha1.NodeVersionList
err := r.List(context.TODO(), &nodeVersionList)
if err != nil {
return []reconcile.Request{}
}
requests := make([]reconcile.Request, len(nodeImageList.Items))
for i, item := range nodeImageList.Items {
requests := make([]reconcile.Request, len(nodeVersionList.Items))
for i, item := range nodeVersionList.Items {
requests[i] = reconcile.Request{
NamespacedName: types.NamespacedName{Name: item.GetName()},
}

View File

@ -237,39 +237,39 @@ func TestNodeMaintenanceSucceededPredicate(t *testing.T) {
func TestFindObjectsForScalingGroup(t *testing.T) {
scalingGroup := updatev1alpha1.ScalingGroup{
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: "nodeimage",
NodeVersion: "nodeversion",
},
}
wantRequests := []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: "nodeimage",
Name: "nodeversion",
},
},
}
assert := assert.New(t)
reconciler := NodeImageReconciler{}
reconciler := NodeVersionReconciler{}
requests := reconciler.findObjectsForScalingGroup(&scalingGroup)
assert.ElementsMatch(wantRequests, requests)
}
func TestFindAllNodeImages(t *testing.T) {
func TestFindAllNodeVersions(t *testing.T) {
testCases := map[string]struct {
nodeImage client.Object
listNodeImagesErr error
wantRequests []reconcile.Request
nodeVersion client.Object
listNodeVersionsErr error
wantRequests []reconcile.Request
}{
"getting the corresponding node images fails": {
listNodeImagesErr: errors.New("get-node-images-err"),
listNodeVersionsErr: errors.New("get-node-version-err"),
},
"node image reconcile request is returned": {
nodeImage: &updatev1alpha1.NodeImage{
ObjectMeta: metav1.ObjectMeta{Name: "nodeimage"},
nodeVersion: &updatev1alpha1.NodeVersion{
ObjectMeta: metav1.ObjectMeta{Name: "nodeversion"},
},
wantRequests: []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: "nodeimage",
Name: "nodeversion",
},
},
},
@ -280,10 +280,10 @@ func TestFindAllNodeImages(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
reconciler := NodeImageReconciler{
Client: newStubReaderClient(t, []runtime.Object{tc.nodeImage}, nil, tc.listNodeImagesErr),
reconciler := NodeVersionReconciler{
Client: newStubReaderClient(t, []runtime.Object{tc.nodeVersion}, nil, tc.listNodeVersionsErr),
}
requests := reconciler.findAllNodeImages(nil)
requests := reconciler.findAllNodeVersions(nil)
assert.ElementsMatch(tc.wantRequests, requests)
})
}

View File

@ -28,7 +28,7 @@ import (
)
const (
nodeImageField = ".spec.nodeImage"
nodeVersionField = ".spec.nodeVersion"
conditionScalingGroupUpToDateReason = "ScalingGroupNodeImageUpToDate"
conditionScalingGroupUpToDateMessage = "Scaling group will use the latest image when creating new nodes"
conditionScalingGroupOutOfDateReason = "ScalingGroupNodeImageOutOfDate"
@ -54,10 +54,10 @@ func NewScalingGroupReconciler(scalingGroupUpdater scalingGroupUpdater, client c
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=scalinggroups,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=scalinggroups/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=scalinggroups/finalizers,verbs=update
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeimage,verbs=get;list;watch
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeimages/status,verbs=get
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversion,verbs=get;list;watch
//+kubebuilder:rbac:groups=update.edgeless.systems,resources=nodeversion/status,verbs=get
// Reconcile reads the latest node image from the referenced NodeImage spec and updates the scaling group to match.
// Reconcile reads the latest node image from the referenced NodeVersion spec and updates the scaling group to match.
func (r *ScalingGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logr := log.FromContext(ctx)
@ -66,9 +66,9 @@ func (r *ScalingGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request
logr.Error(err, "Unable to fetch ScalingGroup")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
var desiredNodeImage updatev1alpha1.NodeImage
if err := r.Get(ctx, client.ObjectKey{Name: desiredScalingGroup.Spec.NodeImage}, &desiredNodeImage); err != nil {
logr.Error(err, "Unable to fetch NodeImage")
var desiredNodeVersion updatev1alpha1.NodeVersion
if err := r.Get(ctx, client.ObjectKey{Name: desiredScalingGroup.Spec.NodeVersion}, &desiredNodeVersion); err != nil {
logr.Error(err, "Unable to fetch NodeVersion")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
nodeImage, err := r.scalingGroupUpdater.GetScalingGroupImage(ctx, desiredScalingGroup.Spec.GroupID)
@ -81,7 +81,7 @@ func (r *ScalingGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request
outdatedCondition := metav1.Condition{
Type: updatev1alpha1.ConditionOutdated,
}
imagesMatch := strings.EqualFold(nodeImage, desiredNodeImage.Spec.ImageReference)
imagesMatch := strings.EqualFold(nodeImage, desiredNodeVersion.Spec.ImageReference)
if imagesMatch {
outdatedCondition.Status = metav1.ConditionFalse
outdatedCondition.Reason = conditionScalingGroupUpToDateReason
@ -99,7 +99,7 @@ func (r *ScalingGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request
if !imagesMatch {
logr.Info("ScalingGroup NodeImage is out of date")
if err := r.scalingGroupUpdater.SetScalingGroupImage(ctx, desiredScalingGroup.Spec.GroupID, desiredNodeImage.Spec.ImageReference); err != nil {
if err := r.scalingGroupUpdater.SetScalingGroupImage(ctx, desiredScalingGroup.Spec.GroupID, desiredNodeVersion.Spec.ImageReference); err != nil {
logr.Error(err, "Unable to set ScalingGroup NodeImage")
return ctrl.Result{}, err
}
@ -111,31 +111,31 @@ func (r *ScalingGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request
// SetupWithManager sets up the controller with the Manager.
func (r *ScalingGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &updatev1alpha1.ScalingGroup{}, nodeImageField, func(rawObj client.Object) []string {
// Extract the NodeImage name from the ScalingGroup Spec, if one is provided
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &updatev1alpha1.ScalingGroup{}, nodeVersionField, func(rawObj client.Object) []string {
// Extract the NodeVersion name from the ScalingGroup Spec, if one is provided
scalingGroup := rawObj.(*updatev1alpha1.ScalingGroup)
if scalingGroup.Spec.NodeImage == "" {
if scalingGroup.Spec.NodeVersion == "" {
return nil
}
return []string{scalingGroup.Spec.NodeImage}
return []string{scalingGroup.Spec.NodeVersion}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&updatev1alpha1.ScalingGroup{}).
Watches(
&source.Kind{Type: &updatev1alpha1.NodeImage{}},
handler.EnqueueRequestsFromMapFunc(r.findObjectsForNodeImage),
&source.Kind{Type: &updatev1alpha1.NodeVersion{}},
handler.EnqueueRequestsFromMapFunc(r.findObjectsForNodeVersion),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}),
).
Complete(r)
}
// findObjectsForNodeImage requests reconcile calls for every scaling group referencing the node image.
func (r *ScalingGroupReconciler) findObjectsForNodeImage(nodeImage client.Object) []reconcile.Request {
// findObjectsForNodeVersion requests reconcile calls for every scaling group referencing the node image.
func (r *ScalingGroupReconciler) findObjectsForNodeVersion(nodeVersion client.Object) []reconcile.Request {
attachedScalingGroups := &updatev1alpha1.ScalingGroupList{}
listOps := &client.ListOptions{
FieldSelector: fields.OneTermEqualSelector(nodeImageField, nodeImage.GetName()),
FieldSelector: fields.OneTermEqualSelector(nodeVersionField, nodeVersion.GetName()),
}
if err := r.List(context.TODO(), attachedScalingGroups, listOps); err != nil {
return []reconcile.Request{}

View File

@ -23,7 +23,7 @@ import (
var _ = Describe("ScalingGroup controller", func() {
// Define utility constants for object names and testing timeouts/durations and intervals.
const (
nodeImageName = "node-image"
nodeVersionName = "node-version"
scalingGroupName = "test-group"
timeout = time.Second * 20
@ -31,30 +31,30 @@ var _ = Describe("ScalingGroup controller", func() {
interval = time.Millisecond * 250
)
nodeImageLookupKey := types.NamespacedName{Name: nodeImageName}
nodeVersionLookupKey := types.NamespacedName{Name: nodeVersionName}
Context("When changing a node image resource spec", func() {
It("Should update corresponding scaling group images", func() {
By("creating a node image resource")
ctx := context.Background()
nodeImage := &updatev1alpha1.NodeImage{
nodeVersion := &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{
APIVersion: "update.edgeless.systems/v1alpha1",
Kind: "NodeImage",
Kind: "NodeVersion",
},
ObjectMeta: metav1.ObjectMeta{
Name: nodeImageName,
Name: nodeVersionName,
},
Spec: updatev1alpha1.NodeImageSpec{
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-1",
},
}
Expect(k8sClient.Create(ctx, nodeImage)).Should(Succeed())
createdNodeImage := &updatev1alpha1.NodeImage{}
Expect(k8sClient.Create(ctx, nodeVersion)).Should(Succeed())
createdNodeVersion := &updatev1alpha1.NodeVersion{}
Eventually(func() error {
return k8sClient.Get(ctx, nodeImageLookupKey, createdNodeImage)
return k8sClient.Get(ctx, nodeVersionLookupKey, createdNodeVersion)
}, timeout, interval).Should(Succeed())
Expect(createdNodeImage.Spec.ImageReference).Should(Equal("image-1"))
Expect(createdNodeVersion.Spec.ImageReference).Should(Equal("image-1"))
By("creating a scaling group")
scalingGroup := &updatev1alpha1.ScalingGroup{
@ -66,8 +66,8 @@ var _ = Describe("ScalingGroup controller", func() {
Name: scalingGroupName,
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: nodeImageName,
GroupID: "group-id",
NodeVersion: nodeVersionName,
GroupID: "group-id",
},
}
Expect(k8sClient.Create(ctx, scalingGroup)).Should(Succeed())
@ -98,9 +98,9 @@ var _ = Describe("ScalingGroup controller", func() {
}, duration, interval).Should(Equal("image-1"))
By("updating the node image")
Expect(k8sClient.Get(ctx, nodeImageLookupKey, nodeImage)).Should(Succeed())
nodeImage.Spec.ImageReference = "image-2"
Expect(k8sClient.Update(ctx, nodeImage)).Should(Succeed())
Expect(k8sClient.Get(ctx, nodeVersionLookupKey, nodeVersion)).Should(Succeed())
nodeVersion.Spec.ImageReference = "image-2"
Expect(k8sClient.Update(ctx, nodeVersion)).Should(Succeed())
By("checking the scaling group eventually uses the latest image")
Eventually(func() string {
@ -118,7 +118,7 @@ var _ = Describe("ScalingGroup controller", func() {
}, duration, interval).Should(Equal("image-2"))
By("cleaning up all resources")
Expect(k8sClient.Delete(ctx, createdNodeImage)).Should(Succeed())
Expect(k8sClient.Delete(ctx, createdNodeVersion)).Should(Succeed())
Expect(k8sClient.Delete(ctx, scalingGroup)).Should(Succeed())
})
})

View File

@ -115,7 +115,7 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
err = (&NodeImageReconciler{
err = (&NodeVersionReconciler{
nodeReplacer: fakes.nodeReplacer,
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),

View File

@ -9,8 +9,8 @@ package constants
const (
// AutoscalingStrategyResourceName resource name used for AutoscalingStrategy.
AutoscalingStrategyResourceName = "autoscalingstrategy"
// NodeImageResourceName resource name used for NodeImage.
NodeImageResourceName = "constellation-os"
// NodeVersionResourceName resource name used for NodeVersion.
NodeVersionResourceName = "constellation-version"
// ControlPlaneScalingGroupResourceName resource name used for ControlPlaneScalingGroup.
ControlPlaneScalingGroupResourceName = "scalinggroup-controlplane"
// WorkerScalingGroupResourceName resource name used for WorkerScaling.

View File

@ -12,9 +12,11 @@ import (
"errors"
"fmt"
"strings"
"time"
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/v2/api/v1alpha1"
"github.com/edgelesssys/constellation/operators/constellation-node-operator/v2/internal/constants"
corev1 "k8s.io/api/core/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -22,7 +24,7 @@ import (
)
// InitialResources creates the initial resources for the node operator.
func InitialResources(ctx context.Context, k8sClient client.Writer, imageInfo imageInfoGetter, scalingGroupGetter scalingGroupGetter, uid string) error {
func InitialResources(ctx context.Context, k8sClient client.Client, imageInfo imageInfoGetter, scalingGroupGetter scalingGroupGetter, uid string) error {
logr := log.FromContext(ctx)
controlPlaneGroupIDs, workerGroupIDs, err := scalingGroupGetter.ListScalingGroups(ctx, uid)
if err != nil {
@ -50,8 +52,8 @@ func InitialResources(ctx context.Context, k8sClient client.Writer, imageInfo im
imageVersion = ""
}
if err := createNodeImage(ctx, k8sClient, imageReference, imageVersion); err != nil {
return fmt.Errorf("creating initial node image %q: %w", imageReference, err)
if err := createNodeVersion(ctx, k8sClient, imageReference, imageVersion); err != nil {
return fmt.Errorf("creating initial node version %q: %w", imageReference, err)
}
for _, groupID := range controlPlaneGroupIDs {
groupName, err := scalingGroupGetter.GetScalingGroupName(groupID)
@ -110,22 +112,61 @@ func createAutoscalingStrategy(ctx context.Context, k8sClient client.Writer, pro
return err
}
// createNodeImage creates the initial nodeimage resource if it does not exist yet.
func createNodeImage(ctx context.Context, k8sClient client.Writer, imageReference, imageVersion string) error {
err := k8sClient.Create(ctx, &updatev1alpha1.NodeImage{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeImage"},
// createNodeVersion creates the initial nodeversion resource if it does not exist yet.
func createNodeVersion(ctx context.Context, k8sClient client.Client, imageReference, imageVersion string) error {
k8sComponentsRef, err := findLatestK8sComponentsConfigMap(ctx, k8sClient)
if err != nil {
return fmt.Errorf("finding latest k8s-components configmap: %w", err)
}
err = k8sClient.Create(ctx, &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeImageResourceName,
Name: constants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeImageSpec{
ImageReference: imageReference,
ImageVersion: imageVersion,
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: imageReference,
ImageVersion: imageVersion,
KubernetesComponentsReference: k8sComponentsRef,
},
})
if k8sErrors.IsAlreadyExists(err) {
return nil
} else if err != nil {
return err
}
return err
return nil
}
// findLatestK8sComponentsConfigMap finds most recently created k8s-components configmap in the kube-system namespace.
// It returns an error if there is no or multiple configmaps matching the prefix "k8s-components".
func findLatestK8sComponentsConfigMap(ctx context.Context, k8sClient client.Client) (string, error) {
var configMaps corev1.ConfigMapList
err := k8sClient.List(ctx, &configMaps, client.InNamespace("kube-system"))
if err != nil {
return "", fmt.Errorf("listing configmaps: %w", err)
}
// collect all k8s-components configmaps
componentConfigMaps := make(map[string]time.Time)
for _, configMap := range configMaps.Items {
if strings.HasPrefix(configMap.Name, "k8s-components") {
componentConfigMaps[configMap.Name] = configMap.CreationTimestamp.Time
}
}
if len(componentConfigMaps) == 0 {
return "", fmt.Errorf("no configmaps found")
}
// find latest configmap
var latestConfigMap string
var latestTime time.Time
for configMap, creationTime := range componentConfigMaps {
if creationTime.After(latestTime) {
latestConfigMap = configMap
latestTime = creationTime
}
}
return latestConfigMap, nil
}
// createScalingGroup creates an initial scaling group resource if it does not exist yet.
@ -136,7 +177,7 @@ func createScalingGroup(ctx context.Context, config newScalingGroupConfig) error
Name: strings.ToLower(config.groupName),
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: constants.NodeImageResourceName,
NodeVersion: constants.NodeVersionResourceName,
GroupID: config.groupID,
AutoscalerGroupName: config.autoscalingGroupName,
Min: 1,

View File

@ -10,18 +10,22 @@ import (
"context"
"errors"
"testing"
"time"
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/v2/api/v1alpha1"
"github.com/edgelesssys/constellation/operators/constellation-node-operator/v2/internal/constants"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func TestInitialResources(t *testing.T) {
k8sComponentsReference := "k8s-components-sha256-ABC"
testCases := map[string]struct {
items []scalingGroupStoreItem
imageErr error
@ -85,7 +89,16 @@ func TestInitialResources(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
k8sClient := &stubK8sClient{createErr: tc.createErr}
k8sClient := &fakeK8sClient{
createErr: tc.createErr,
listConfigMaps: []corev1.ConfigMap{
{
ObjectMeta: metav1.ObjectMeta{
Name: k8sComponentsReference,
},
},
},
}
scalingGroupGetter := newScalingGroupGetter(tc.items, tc.imageErr, tc.nameErr, tc.listErr)
err := InitialResources(context.Background(), k8sClient, &stubImageInfo{}, scalingGroupGetter, "uid")
if tc.wantErr {
@ -156,7 +169,7 @@ func TestCreateAutoscalingStrategy(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
k8sClient := &stubK8sClient{createErr: tc.createErr}
k8sClient := &fakeK8sClient{createErr: tc.createErr}
err := createAutoscalingStrategy(context.Background(), k8sClient, "stub")
if tc.wantErr {
assert.Error(err)
@ -169,21 +182,24 @@ func TestCreateAutoscalingStrategy(t *testing.T) {
}
}
func TestCreateNodeImage(t *testing.T) {
func TestCreateNodeVersion(t *testing.T) {
k8sComponentsReference := "k8s-components-sha256-reference"
testCases := map[string]struct {
createErr error
wantNodeImage *updatev1alpha1.NodeImage
wantErr bool
createErr error
existingNodeVersion *updatev1alpha1.NodeVersion
wantNodeVersion *updatev1alpha1.NodeVersion
wantErr bool
}{
"create works": {
wantNodeImage: &updatev1alpha1.NodeImage{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeImage"},
wantNodeVersion: &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeImageResourceName,
Name: constants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeImageSpec{
ImageReference: "image-reference",
ImageVersion: "image-version",
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-reference",
ImageVersion: "image-version",
KubernetesComponentsReference: k8sComponentsReference,
},
},
},
@ -191,16 +207,28 @@ func TestCreateNodeImage(t *testing.T) {
createErr: errors.New("create failed"),
wantErr: true,
},
"image exists": {
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, constants.AutoscalingStrategyResourceName),
wantNodeImage: &updatev1alpha1.NodeImage{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeImage"},
"version exists": {
createErr: k8sErrors.NewAlreadyExists(schema.GroupResource{}, constants.NodeVersionResourceName),
existingNodeVersion: &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeImageResourceName,
Name: constants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeImageSpec{
ImageReference: "image-reference",
ImageVersion: "image-version",
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-reference2",
ImageVersion: "image-version2",
KubernetesComponentsReference: "components-reference2",
},
},
wantNodeVersion: &updatev1alpha1.NodeVersion{
TypeMeta: metav1.TypeMeta{APIVersion: "update.edgeless.systems/v1alpha1", Kind: "NodeVersion"},
ObjectMeta: metav1.ObjectMeta{
Name: constants.NodeVersionResourceName,
},
Spec: updatev1alpha1.NodeVersionSpec{
ImageReference: "image-reference2",
ImageVersion: "image-version2",
KubernetesComponentsReference: "components-reference2",
},
},
},
@ -211,15 +239,28 @@ func TestCreateNodeImage(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
k8sClient := &stubK8sClient{createErr: tc.createErr}
err := createNodeImage(context.Background(), k8sClient, "image-reference", "image-version")
k8sClient := &fakeK8sClient{
createErr: tc.createErr,
listConfigMaps: []corev1.ConfigMap{
{
ObjectMeta: metav1.ObjectMeta{
Name: k8sComponentsReference,
CreationTimestamp: metav1.Time{Time: time.Unix(1, 0)},
},
},
},
}
if tc.existingNodeVersion != nil {
k8sClient.createdObjects = append(k8sClient.createdObjects, tc.existingNodeVersion)
}
err := createNodeVersion(context.Background(), k8sClient, "image-reference", "image-version")
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
assert.Len(k8sClient.createdObjects, 1)
assert.Equal(tc.wantNodeImage, k8sClient.createdObjects[0])
assert.Equal(tc.wantNodeVersion, k8sClient.createdObjects[0])
})
}
}
@ -237,7 +278,7 @@ func TestCreateScalingGroup(t *testing.T) {
Name: "group-name",
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: constants.NodeImageResourceName,
NodeVersion: constants.NodeVersionResourceName,
GroupID: "group-id",
AutoscalerGroupName: "group-Name",
Min: 1,
@ -258,7 +299,7 @@ func TestCreateScalingGroup(t *testing.T) {
Name: "group-name",
},
Spec: updatev1alpha1.ScalingGroupSpec{
NodeImage: constants.NodeImageResourceName,
NodeVersion: constants.NodeVersionResourceName,
GroupID: "group-id",
AutoscalerGroupName: "group-Name",
Min: 1,
@ -274,7 +315,7 @@ func TestCreateScalingGroup(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
k8sClient := &stubK8sClient{createErr: tc.createErr}
k8sClient := &fakeK8sClient{createErr: tc.createErr}
newScalingGroupConfig := newScalingGroupConfig{k8sClient, "group-id", "group-Name", "group-Name", updatev1alpha1.WorkerRole}
err := createScalingGroup(context.Background(), newScalingGroupConfig)
if tc.wantErr {
@ -288,17 +329,65 @@ func TestCreateScalingGroup(t *testing.T) {
}
}
type stubK8sClient struct {
type fakeK8sClient struct {
createdObjects []client.Object
createErr error
client.Writer
listConfigMaps []corev1.ConfigMap
listErr error
getErr error
updateErr error
client.Client
}
func (s *stubK8sClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
func (s *fakeK8sClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
for _, o := range s.createdObjects {
if obj.GetName() == o.GetName() {
return k8sErrors.NewAlreadyExists(schema.GroupResource{}, obj.GetName())
}
}
s.createdObjects = append(s.createdObjects, obj)
return s.createErr
}
func (s *fakeK8sClient) Get(ctx context.Context, key types.NamespacedName, obj client.Object, opts ...client.GetOption) error {
if ObjNodeVersion, ok := obj.(*updatev1alpha1.NodeVersion); ok {
for _, o := range s.createdObjects {
if createdNodeVersion, ok := o.(*updatev1alpha1.NodeVersion); ok && createdNodeVersion != nil {
if createdNodeVersion.Name == key.Name {
ObjNodeVersion.ObjectMeta = createdNodeVersion.ObjectMeta
ObjNodeVersion.TypeMeta = createdNodeVersion.TypeMeta
ObjNodeVersion.Spec = createdNodeVersion.Spec
return nil
}
}
}
}
return s.getErr
}
func (s *fakeK8sClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
if updatedObjectNodeVersion, ok := obj.(*updatev1alpha1.NodeVersion); ok {
for i, o := range s.createdObjects {
if createdObjectNodeVersion, ok := o.(*updatev1alpha1.NodeVersion); ok && createdObjectNodeVersion != nil {
if createdObjectNodeVersion.Name == updatedObjectNodeVersion.Name {
s.createdObjects[i] = obj
return nil
}
}
}
}
return s.updateErr
}
func (s *fakeK8sClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if configMapList, ok := list.(*corev1.ConfigMapList); ok {
configMapList.Items = append(configMapList.Items, s.listConfigMaps...)
}
return s.listErr
}
type stubImageInfo struct {
imageVersion string
err error

View File

@ -134,10 +134,10 @@ func main() {
setupLog.Error(err, "Unable to deploy initial resources")
os.Exit(1)
}
if err = controllers.NewNodeImageReconciler(
if err = controllers.NewNodeVersionReconciler(
cspClient, etcdClient, mgr.GetClient(), mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "NodeImage")
setupLog.Error(err, "Unable to create controller", "controller", "NodeVersion")
os.Exit(1)
}
if err = (&controllers.AutoscalingStrategyReconciler{

View File

@ -38,18 +38,7 @@ All Constellation microservices will be bundled into and therefore updated via o
## Extending the JoinService
The CLI will use a lookup table to map the Kubernetes version from the config to URLs and hashes. Those are sent over during `constellation init` and used by the first Bootstrapper. Then, the URLs and hashes are pushed to the `k8s-components-1.23.12` ConfigMap and the Kubernetes version with a reference to the `k8s-components-1.23.12` ConfigMap is pushed to `k8s-versions`.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: k8s-version
namespace: kube-system
data:
k8s-version: "1.23.12"
components: "k8s-components-1.23.12-sha256-8ae09b7e922a90fea7a4259fb096f73e9efa948ea2f09349618102a328c44b8b" # This references the ConfigMap below.
```
The CLI will use a lookup table to map the Kubernetes version from the config to URLs and hashes. Those are sent over during `constellation init` and used by the first Bootstrapper. Then, the URLs and hashes are pushed to the `k8s-components-1.23.12` ConfigMap and the `k8s-components-1.23.12` ConfigMap is referenced by the `NodeVersion` CR named `constellation-version`.
```yaml
apiVersion: v1
@ -63,10 +52,11 @@ data:
'[{"URL":"https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz","Hash":"sha256:b275772da4026d2161bf8a8b41ed4786754c8a93ebfb6564006d5da7f23831e5","InstallPath":"/opt/cni/bin","Extract":true},{"URL":"https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.25.0/crictl-v1.25.0-linux-amd64.tar.gz","Hash":"sha256:86ab210c007f521ac4cdcbcf0ae3fb2e10923e65f16de83e0e1db191a07f0235","InstallPath":"/run/state/bin","Extract":true},{"URL":"https://storage.googleapis.com/kubernetes-release/release/v1.23.12/bin/linux/amd64/kubelet","Hash":"sha256:2da0b93857cf352bff5d1eb42e34d398a5971b63a53d8687b45179a78540d6d6","InstallPath":"/run/state/bin/kubelet","Extract":false},{"URL":"https://storage.googleapis.com/kubernetes-release/release/v1.23.12/bin/linux/amd64/kubeadm","Hash":"sha256:9fea42b4fb5eb2da638d20710ebb791dde221e6477793d3de70134ac058c4cc7","InstallPath":"/run/state/bin/kubeadm","Extract":false},{"URL":"https://storage.googleapis.com/kubernetes-release/release/v1.23.12/bin/linux/amd64/kubectl","Hash":"sha256:f93c18751ec715b4d4437e7ece18fe91948c71be1f24ab02a2dde150f5449855","InstallPath":"/run/state/bin/kubectl","Extract":false}]'
```
The JoinService will consume the `k8s-components-1.23.12` ConfigMap in addition to the `k8s-version` ConfigMap. Currently, the `k8s-version` ConfigMap is mounted into the JoinService pod. We will change that so that the JoinService requests the ConfigMap values via the Kubernetes API. If a new node wants to join the cluster, the JoinService looks up the current Kubernetes version and all the component download URLs and hashes and sends them to the joining node.
The JoinService will look at the `k8s-components-1.23.12` ConfigMap in addition to the `NodeVersion` CR named `constellation-version`. Currently, the `k8s-version` ConfigMap is mounted into the JoinService pod. We will change that so that the JoinService requests the `kubernetesComponentsReference` from `constellation-version` and then uses this to look up the Kubernetes components.
Those components are then sent to any node requesting to join the cluster.
Additionally, with each node trying to join the cluster is tracked.
The JoinService creates a JoiningNode CRD for each issued JoinTicket with the node's name and the hash of the components it was sent. This JoiningNode CRD is consumed by the node operator.
Additionally, each node trying to join the cluster is tracked as a `JoiningNode` CR.
The JoinService creates a `JoiningNode` CRD for each issued JoinTicket with the node's name and reference to the Kubernetes components ConfigMap it was sent. This `JoiningNode` CRD is consumed by the node operator.
## Extending the Bootstrapper
@ -76,20 +66,20 @@ We receive all necessary information from the CLI in the first place, since we n
To be able to even update singular components, we need to know if the set of components of a node is the desired one. To achieve that, the Bootstrapper calculates a hash of all the components' hashes.
Because of the length restriction for labels, we need to attach this information as an annotation to the node.
Annotations cannot be set during the join process (in contrast to node-labels).
Therefore, for every JoinRequest, the JoinService writes an entry to a ConfigMap.
This ConfigMap will later be consumed by the node operator.
The ConfigMap will contain a `map[string]map[string]string` in `data.joining-nodes`.
This map will map the node name to a map of annotation keys and annotation values.
Therefore, for every JoinRequest, the JoinService will create a JoiningNode CR.
This CRD will later be consumed by the node operator.
The JoiningNode CRD will contain a `componentsreference` in its spec.
```yaml
apiVersion: v1
kind: ConfigMap
apiVersion: update.edgeless.systems/v1alpha1
kind: JoiningNode
metadata:
name: joining-nodes
namespace: kube-system
data:
joining-nodes: '{"constell-worker-83df2": {"constellation.edgeless.systems/kubernetes-components-hash": "sha256:f40d4b6feb791e069158b69bf7a70e4acd43600976673ea40f649919233fa783"},
"constell-worker-jsu3l": {"constellation.edgeless.systems/kubernetes-components-hash": "sha256:f40d4b6feb791e069158b69bf7a70e4acd43600976673ea40f649919233fa783"}}'
name: leo-1645f3a5-worker000001
spec:
name: leo-1645f3a5-worker000001
iscontrolplane: false
componentsreferece: k8s-components-sha256-4054c3597f2ff5c582aaaf212db56db2b14037e79148d82d95dc046f4fc6d92e
deadline: "2023-01-04T10:30:35Z"
```
## Creating an upgrade agent
@ -126,17 +116,17 @@ The CLI hands users the same mechanism to deliver the Kubernetes version to the
```patch
// NodeImageSpec defines the desired state of NodeImage.
-type NodeImageSpec struct {
+type NodeSpec struct {
+type NodeVersionSpec struct {
// ImageReference is the image to use for all nodes.
ImageReference string `json:"image,omitempty"`
// ImageVersion is the CSP independent version of the image to use for all nodes.
ImageVersion string `json:"imageVersion,omitempty"`
+ // KubernetesVersion defines the Kubernetes version for all nodes.
+ KubernetesVersion string `json:"kubernetesVersion,omitempty"`
+ // KubernetesComponentsReference is a reference to the ConfigMap containing the Kubernetes components to use for all nodes.
+ KubernetesComponentsReference string `json:"kubernetesComponentsReference,omitempty"`
}
```
Additionally, we will change the `NodeImageStatus` to `NodeStatus` (see `nodeimage_types.go`) along with the corresponding controllers.
Additionally, we will change the `NodeImageStatus` to `NodeVersionStatus` (see `nodeimage_types.go`) along with the corresponding controllers.
The Controller will need to take the following steps to update the Kubernetes version:
@ -144,7 +134,7 @@ The Controller will need to take the following steps to update the Kubernetes ve
* get the kubeadm download URL and hash from the `k8s-components-1.23.12` ConfigMap
* pass the URL and hash over a socket mounted into its container to the local update agent running on the same node
* The agent downloads the new kubeadm binary, checks its hash and executes `kubeadm upgrade plan` and `kubeadm upgrade apply v1.23.12`
* After the agent returned successfully, update the Kubernetes version to `1.23.12` and components reference to `k8s-components-1.23.12` in the `k8s-version` ConfigMap
* After the agent returned successfully, update the components reference to `k8s-components-1.23.12` in the `NodeVersion` CRD named `constellation-version`.
* Now, iterate over all nodes, and replace them if their Kubernetes version is outdated
## Extending the `constellation upgrade` command
@ -254,11 +244,9 @@ When `constellation upgrade apply` is called the CLI needs to perform the follow
1. warn the user to create a Constellation/etcd backup before updating as documented in the [official K8s update docs](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/#before-you-begin)
2. create a new `k8s-components-1.24.3` ConfigMap with the corresponding URLs and hashes from the lookup table in the CLI
3. update the measurements in the `join-config` ConfigMap
4. update the Kubernetes version and VM image in the `nodeimage` CRD
4. update the Kubernetes version and VM image in the `NodeVersion` CRD named `constellation-verison`
5. update Constellation microservices
The actual update in step 2. and 3. will be handled by the node-operator inside Constellation. Step 5. will be done via client side helm deployments.
Since the service versions bundled inside a `microserviceVersion` are hidden, the CLI will print the changes taking place. We also print a warning to back up any important components when the upgrade necessitates a node replacement, i.e. on Kubernetes and VM image upgrades.
```bash