join: synchronize control plane joining (#776)

* join: synchronize control plane joining
This commit is contained in:
3u13r 2022-12-09 18:30:20 +01:00 committed by GitHub
parent 012f739c67
commit c993cd6800
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 1166 additions and 61 deletions

View File

@ -227,11 +227,8 @@ func (k *KubeWrapper) InitCluster(
}
// cert-manager is necessary for our operator deployments.
// They are currently only deployed on GCP & Azure. This is why we deploy cert-manager only on GCP & Azure.
if k.cloudProvider == "gcp" || k.cloudProvider == "azure" {
if err = k.helmClient.InstallCertManager(ctx, helmReleases.CertManager); err != nil {
return nil, fmt.Errorf("installing cert-manager: %w", err)
}
if err = k.helmClient.InstallCertManager(ctx, helmReleases.CertManager); err != nil {
return nil, fmt.Errorf("installing cert-manager: %w", err)
}
operatorVals, err := k.setupOperatorVals(ctx)

View File

@ -10,8 +10,12 @@ dependencies:
tags:
- Azure
- GCP
- AWS
- QEMU
- name: constellation-operator
version: 2.3.0-pre
tags:
- Azure
- GCP
- AWS
- QEMU

View File

@ -38,6 +38,15 @@ spec:
description: ComponentsHash is the hash of the components that were
sent to the node by the join service.
type: string
deadline:
description: Deadline is the time after which the joining node is
considered to have failed.
format: date-time
type: string
iscontrolplane:
description: IsControlPlane is true if the node is a control plane
node.
type: boolean
name:
description: Name of the node expected to join.
type: string

View File

@ -0,0 +1,143 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: constellation-operator-controller-manager
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: constellation-operator-controller-manager
namespace: testNamespace
labels:
control-plane: controller-manager
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
template:
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
command:
- /manager
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
- name: CONSTEL_CSP
value: QEMU
- name: constellation-uid
value: "42424242424242"
image: constellationOperatorImage
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
- mountPath: /host/usr/lib/os-release
name: usr-lib-os-release
- mountPath: /etc/os-release
name: etc-os-release
- mountPath: /etc/azure
name: azureconfig
readOnly: true
- mountPath: /etc/gce
name: gceconf
readOnly: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
securityContext:
runAsUser: 0
serviceAccountName: constellation-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- hostPath:
path: /etc/kubernetes/pki/etcd
type: Directory
name: etcd-certs
- hostPath:
path: /usr/lib/os-release
type: File
name: usr-lib-os-release
- hostPath:
path: /etc/os-release
type: File
name: etc-os-release
- name: azureconfig
secret:
optional: true
secretName: azureconfig
- configMap:
name: gceconf
optional: true
name: gceconf

View File

@ -0,0 +1,61 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: constellation-operator-leader-election-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: constellation-operator-leader-election-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'constellation-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,23 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: constellation-operator-manager-config
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
data:
controller_manager_config.yaml: |
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
health:
healthProbeBindAddress: ":8081"
kind: ControllerManagerConfig
leaderElection:
leaderElect: true
resourceName: "38cc1645.edgeless.systems"
metrics:
bindAddress: "127.0.0.1:8080"
webhook:
port: 9443

View File

@ -0,0 +1,209 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-manager-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- get
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- joiningnodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- joiningnodes/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- joiningnodes/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
verbs:
- get
- list
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-manager-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-metrics-reader
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: constellation-operator-controller-manager-metrics-service
namespace: testNamespace
labels:
control-plane: controller-manager
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https

View File

@ -0,0 +1,42 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-proxy-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-proxy-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,112 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-maintenance-operator-controller-manager
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-maintenance-operator-controller-manager
namespace: testNamespace
labels:
control-plane: controller-manager
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
template:
metadata:
labels:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=:8080
- --leader-elect
command:
- /manager
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
image: nodeMaintenanceOperatorImage
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
serviceAccountName: node-maintenance-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert

View File

@ -0,0 +1,63 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: node-maintenance-operator-leader-election-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: node-maintenance-operator-leader-election-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'node-maintenance-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,131 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-manager-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/finalizers
verbs:
- update
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/status
verbs:
- get
- patch
- update
- apiGroups:
- oauth.openshift.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-manager-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-metrics-reader
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-controller-manager-metrics-service
namespace: testNamespace
labels:
control-plane: controller-manager
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https

View File

@ -0,0 +1,44 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-proxy-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-proxy-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: node-maintenance-operator-selfsigned-issuer
namespace: testNamespace
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
selfSigned: {}

View File

@ -0,0 +1,18 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: node-maintenance-operator-serving-cert
namespace: testNamespace
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
dnsNames:
- 'node-maintenance-operator-webhook-service.testNamespace.svc'
- 'node-maintenance-operator-webhook-service.testNamespace.svc.cluster.local'
issuerRef:
kind: Issuer
name: node-maintenance-operator-selfsigned-issuer
secretName: webhook-server-cert

View File

@ -0,0 +1,34 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: node-maintenance-operator-validating-webhook-configuration
namespace: testNamespace
annotations:
cert-manager.io/inject-ca-from: testNamespace/node-maintenance-operator-serving-cert
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: node-maintenance-operator-webhook-service
namespace: testNamespace
path: /validate-nodemaintenance-medik8s-io-v1beta1-nodemaintenance
failurePolicy: Fail
name: vnodemaintenance.kb.io
rules:
- apiGroups:
- nodemaintenance.medik8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- nodemaintenances
sideEffects: None
timeoutSeconds: 15

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-webhook-service
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
ports:
- port: 443
protocol: TCP
targetPort: 9443

View File

@ -10,6 +10,7 @@ import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/versions"
@ -81,22 +82,46 @@ func (c *Client) CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap
}
// AddNodeToJoiningNodes adds the provided node as a joining node CRD.
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string) error {
joiningNodeResource := schema.GroupVersionResource{Group: "update.edgeless.systems", Version: "v1alpha1", Resource: "joiningnodes"}
func (c *Client) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
joiningNode := &unstructured.Unstructured{}
objectMetadataName := nodeName
deadline := metav1.NewTime(time.Now().Add(48 * time.Hour))
if isControlPlane {
objectMetadataName = "control-plane"
deadline = metav1.NewTime(time.Now().Add(10 * time.Minute))
}
joiningNode.SetUnstructuredContent(map[string]any{
"apiVersion": "update.edgeless.systems/v1alpha1",
"kind": "JoiningNode",
"metadata": map[string]any{
"name": nodeName,
"name": objectMetadataName,
},
"spec": map[string]any{
"name": nodeName,
"componentshash": componentsHash,
"iscontrolplane": isControlPlane,
"deadline": deadline,
},
})
if isControlPlane {
return c.addControlPlaneToJoiningNodes(ctx, joiningNode)
}
return c.addWorkerToJoiningNodes(ctx, joiningNode)
}
func (c *Client) addControlPlaneToJoiningNodes(ctx context.Context, joiningNode *unstructured.Unstructured) error {
joiningNodeResource := schema.GroupVersionResource{Group: "update.edgeless.systems", Version: "v1alpha1", Resource: "joiningnodes"}
_, err := c.dynClient.Resource(joiningNodeResource).Create(ctx, joiningNode, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create joining control-plane node, maybe another node is already joining: %w", err)
}
return nil
}
func (c *Client) addWorkerToJoiningNodes(ctx context.Context, joiningNode *unstructured.Unstructured) error {
joiningNodeResource := schema.GroupVersionResource{Group: "update.edgeless.systems", Version: "v1alpha1", Resource: "joiningnodes"}
_, err := c.dynClient.Resource(joiningNodeResource).Apply(ctx, joiningNode.GetName(), joiningNode, metav1.ApplyOptions{FieldManager: "join-service"})
if err != nil {
return fmt.Errorf("failed to create joining node: %w", err)

View File

@ -178,7 +178,7 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi
return nil, status.Errorf(codes.Internal, "unable to get node name from CSR: %s", err)
}
if err := s.kubeClient.AddNodeToJoiningNodes(ctx, nodeName, components.GetHash()); err != nil {
if err := s.kubeClient.AddNodeToJoiningNodes(ctx, nodeName, components.GetHash(), req.IsControlPlane); err != nil {
return nil, status.Errorf(codes.Internal, "unable to add node to joining nodes: %s", err)
}
@ -310,6 +310,6 @@ type certificateAuthority interface {
type kubeClient interface {
GetComponents(ctx context.Context, configMapName string) (versions.ComponentVersions, error)
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string) error
AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error
AddReferenceToK8sVersionConfigMap(ctx context.Context, k8sVersionsConfigMapName string, componentsConfigMapName string) error
}

View File

@ -346,7 +346,7 @@ func (s *stubKubeClient) AddReferenceToK8sVersionConfigMap(ctx context.Context,
return s.addReferenceToK8sVersionConfigMapErr
}
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string) error {
func (s *stubKubeClient) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error {
s.joiningNodeName = nodeName
s.componentsHash = componentsHash
return s.addNodeToJoiningNodesErr

View File

@ -18,6 +18,7 @@ testbin/*
# We hold the charts in the cli/internal/helm directory
chart/
bundle/
!vendor/**/zz_generated.*

View File

@ -16,6 +16,10 @@ type JoiningNodeSpec struct {
Name string `json:"name,omitempty"`
// ComponentsHash is the hash of the components that were sent to the node by the join service.
ComponentsHash string `json:"componentshash,omitempty"`
// IsControlPlane is true if the node is a control plane node.
IsControlPlane bool `json:"iscontrolplane,omitempty"`
// Deadline is the time after which the joining node is considered to have failed.
Deadline *metav1.Time `json:"deadline,omitempty"`
}
// JoiningNodeStatus defines the observed state of JoiningNode.

View File

@ -112,7 +112,7 @@ func (in *JoiningNode) DeepCopyInto(out *JoiningNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
@ -169,6 +169,10 @@ func (in *JoiningNodeList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JoiningNodeSpec) DeepCopyInto(out *JoiningNodeSpec) {
*out = *in
if in.Deadline != nil {
in, out := &in.Deadline, &out.Deadline
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoiningNodeSpec.

View File

@ -6,7 +6,7 @@ LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/
LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/
LABEL operators.operatorframework.io.bundle.package.v1=node-operator
LABEL operators.operatorframework.io.bundle.channels.v1=alpha
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.22.1
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.25.3
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3

View File

@ -40,6 +40,15 @@ spec:
description: ComponentsHash is the hash of the components that were
sent to the node by the join service.
type: string
deadline:
description: Deadline is the time after which the joining node is
considered to have failed.
format: date-time
type: string
iscontrolplane:
description: IsControlPlane is true if the node is a control plane
node.
type: boolean
name:
description: Name of the node expected to join.
type: string

View File

@ -11,22 +11,22 @@ spec:
customresourcedefinitions:
owned:
- description: AutoscalingStrategy is the Schema for the autoscalingstrategies
API
API.
displayName: Autoscaling Strategy
kind: AutoscalingStrategy
name: autoscalingstrategies.update.edgeless.systems
version: v1alpha1
- description: NodeImage is the Schema for the nodeimages API
- description: NodeImage is the Schema for the nodeimages API.
displayName: Node Image
kind: NodeImage
name: nodeimages.update.edgeless.systems
version: v1alpha1
- description: PendingNode is the Schema for the pendingnodes API
- description: PendingNode is the Schema for the pendingnodes API.
displayName: Pending Node
kind: PendingNode
name: pendingnodes.update.edgeless.systems
version: v1alpha1
- description: ScalingGroup is the Schema for the scalinggroups API
- description: ScalingGroup is the Schema for the scalinggroups API.
displayName: Scaling Group
kind: ScalingGroup
name: scalinggroups.update.edgeless.systems

View File

@ -8,12 +8,15 @@ package controllers
import (
"context"
"time"
updatev1alpha1 "github.com/edgelesssys/constellation/operators/constellation-node-operator/v2/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/utils/clock"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
@ -33,6 +36,7 @@ const (
type JoiningNodesReconciler struct {
client.Client
Scheme *runtime.Scheme
clock.Clock
}
// NewJoiningNodesReconciler creates a new JoiningNodesReconciler.
@ -40,6 +44,7 @@ func NewJoiningNodesReconciler(client client.Client, scheme *runtime.Scheme) *Jo
return &JoiningNodesReconciler{
Client: client,
Scheme: scheme,
Clock: clock.RealClock{},
}
}
@ -54,14 +59,16 @@ func (r *JoiningNodesReconciler) Reconcile(ctx context.Context, req ctrl.Request
var joiningNode updatev1alpha1.JoiningNode
if err := r.Get(ctx, req.NamespacedName, &joiningNode); err != nil {
logr.Error(err, "unable to fetch JoiningNodes")
if !errors.IsNotFound(err) {
logr.Error(err, "Unable to fetch JoiningNode")
}
return ctrl.Result{}, client.IgnoreNotFound(err)
}
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
var node corev1.Node
if err := r.Get(ctx, types.NamespacedName{Name: joiningNode.Spec.Name}, &node); err != nil {
logr.Error(err, "unable to fetch Node")
logr.Info("unable to fetch Node", "err", err)
return err
}
@ -73,23 +80,35 @@ func (r *JoiningNodesReconciler) Reconcile(ctx context.Context, req ctrl.Request
return r.Update(ctx, &node)
})
if err != nil {
logr.Error(err, "unable to update Node")
return ctrl.Result{}, client.IgnoreNotFound(err)
// check if the deadline has been reached
// requeue if not
if joiningNode.Spec.Deadline == nil || r.Now().Before(joiningNode.Spec.Deadline.Time) {
var requeueAfter time.Duration
if joiningNode.Spec.Deadline == nil {
requeueAfter = defaultCheckInterval
} else {
requeueAfter = joiningNode.Spec.Deadline.Time.Sub(r.Now())
}
return ctrl.Result{
RequeueAfter: requeueAfter,
}, nil
}
}
// if the joining node is too old or the annotation succeeded, delete it.
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err := r.Delete(ctx, &joiningNode); err != nil {
logr.Error(err, "unable to delete JoiningNode")
return err
if err := r.Get(ctx, req.NamespacedName, &joiningNode); err != nil {
return client.IgnoreNotFound(err)
}
return nil
return client.IgnoreNotFound(r.Delete(ctx, &joiningNode))
})
if err != nil {
logr.Error(err, "unable to delete JoiningNode")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
return ctrl.Result{}, err
}
// SetupWithManager sets up the controller with the Manager.

View File

@ -25,8 +25,10 @@ var _ = Describe("JoiningNode controller", func() {
const (
nodeName1 = "node-name-1"
nodeName2 = "node-name-2"
nodeName3 = "node-name-3"
componentsHash1 = "test-hash-1"
componentsHash2 = "test-hash-2"
componentsHash3 = "test-hash-3"
timeout = time.Second * 20
duration = time.Second * 2
@ -135,6 +137,40 @@ var _ = Describe("JoiningNode controller", func() {
return createdNode.Annotations[NodeKubernetesComponentsHashAnnotationKey]
}, timeout, interval).Should(Equal(componentsHash2))
By("deleting the joining node resource")
Eventually(func() error {
return k8sClient.Get(ctx, types.NamespacedName{Name: joiningNode.Name}, createdJoiningNode)
}, timeout, interval).ShouldNot(Succeed())
})
It("Should clean up the joining node resource after the deadline is reached", func() {
ctx := context.Background()
By("creating a joining node resource")
joiningNode := &updatev1alpha1.JoiningNode{
TypeMeta: metav1.TypeMeta{
APIVersion: "update.edgeless.systems/v1alpha1",
Kind: "JoiningNode",
},
ObjectMeta: metav1.ObjectMeta{
Name: nodeName3,
},
Spec: updatev1alpha1.JoiningNodeSpec{
Name: nodeName3,
ComponentsHash: componentsHash3,
// create without deadline first
},
}
Expect(k8sClient.Create(ctx, joiningNode)).Should(Succeed())
createdJoiningNode := &updatev1alpha1.JoiningNode{}
Eventually(func() error {
return k8sClient.Get(ctx, types.NamespacedName{Name: joiningNode.Name}, createdJoiningNode)
}, timeout, interval).Should(Succeed())
Expect(createdJoiningNode.Spec.Name).Should(Equal(nodeName3))
Expect(createdJoiningNode.Spec.ComponentsHash).Should(Equal(componentsHash3))
By("setting the deadline to the past")
createdJoiningNode.Spec.Deadline = &metav1.Time{Time: fakes.clock.Now().Add(-time.Second)}
Expect(k8sClient.Update(ctx, createdJoiningNode)).Should(Succeed())
By("deleting the joining node resource")
Eventually(func() error {
return k8sClient.Get(ctx, types.NamespacedName{Name: joiningNode.Name}, createdJoiningNode)

View File

@ -96,6 +96,7 @@ var _ = BeforeSuite(func() {
err = (&JoiningNodesReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
Clock: fakes.clock,
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())

View File

@ -75,6 +75,7 @@ func main() {
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
// Create CSP client
var cspClient cspAPI
var clientErr error
csp := strings.ToLower(os.Getenv(constellationCSP))
@ -98,8 +99,7 @@ func main() {
os.Exit(1)
}
default:
setupLog.Info("Unknown CSP", "csp", csp)
os.Exit(1)
setupLog.Info("CSP does not support upgrades", "csp", csp)
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
@ -126,44 +126,49 @@ func main() {
os.Exit(1)
}
defer etcdClient.Close()
imageInfo := deploy.NewImageInfo()
if err := deploy.InitialResources(context.Background(), k8sClient, imageInfo, cspClient, os.Getenv(constellationUID)); err != nil {
setupLog.Error(err, "Unable to deploy initial resources")
os.Exit(1)
// Create Controllers
if csp == "azure" || csp == "gcp" {
imageInfo := deploy.NewImageInfo()
if err := deploy.InitialResources(context.Background(), k8sClient, imageInfo, cspClient, os.Getenv(constellationUID)); err != nil {
setupLog.Error(err, "Unable to deploy initial resources")
os.Exit(1)
}
if err = controllers.NewNodeImageReconciler(
cspClient, etcdClient, mgr.GetClient(), mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "NodeImage")
os.Exit(1)
}
if err = (&controllers.AutoscalingStrategyReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "AutoscalingStrategy")
os.Exit(1)
}
if err = controllers.NewScalingGroupReconciler(
cspClient, mgr.GetClient(), mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "ScalingGroup")
os.Exit(1)
}
if err = controllers.NewPendingNodeReconciler(
cspClient, mgr.GetClient(), mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "PendingNode")
os.Exit(1)
}
}
if err = controllers.NewNodeImageReconciler(
cspClient, etcdClient, mgr.GetClient(), mgr.GetScheme(),
if err = controllers.NewJoiningNodesReconciler(
mgr.GetClient(),
mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "NodeImage")
os.Exit(1)
}
if err = (&controllers.AutoscalingStrategyReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "AutoscalingStrategy")
os.Exit(1)
}
if err = (&controllers.JoiningNodesReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "JoiningNode")
os.Exit(1)
}
if err = controllers.NewScalingGroupReconciler(
cspClient, mgr.GetClient(), mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "ScalingGroup")
os.Exit(1)
}
if err = controllers.NewPendingNodeReconciler(
cspClient, mgr.GetClient(), mgr.GetScheme(),
).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "PendingNode")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {