Make loader tests more precise

Until now the loader tests did not detect if a file in testdata existed,
but was missing from the actual results. This patch fixes the problem.
It also removes various files that are not needed.
The testdata folder now represents which files end up in a cluster 1:1.
This commit is contained in:
Otto Bittner 2022-11-30 17:27:08 +01:00
parent c05d1589f8
commit a20b5461aa
21 changed files with 82 additions and 1026 deletions

View File

@ -13,6 +13,7 @@ import (
"io/fs"
"os"
"path"
"path/filepath"
"sort"
"strings"
"testing"
@ -121,30 +122,14 @@ func TestConstellationServices(t *testing.T) {
result, err := engine.Render(chart, valuesToRender)
require.NoError(err)
for k, actual := range result {
currentFile := path.Join("testdata", tc.config.GetProvider().String(), k)
expected, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile))
continue
}
require.NoError(err)
testDataPath := path.Join("testdata", tc.config.GetProvider().String(), "constellation-services")
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
expectedSplit := strings.Split(string(expected), "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actual, "\n---\n")
sort.Strings(actualSplit)
assert.Equal(len(expectedSplit), len(actualSplit))
// Build a map with the same structe as result: filepaths -> rendered template.
expectedData := map[string]string{}
err = filepath.Walk(testDataPath, buildTestdataMap(tc.config.GetProvider().String(), expectedData, require))
require.NoError(err)
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile))
}
}
compareMaps(expectedData, result, assert, require, t)
})
}
}
@ -204,34 +189,87 @@ func TestOperators(t *testing.T) {
result, err := engine.Render(chart, valuesToRender)
require.NoError(err)
for k, actual := range result {
currentFile := path.Join("testdata", tc.csp.String(), k)
expected, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile))
continue
}
require.NoError(err)
testDataPath := path.Join("testdata", tc.csp.String(), "constellation-operators")
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
expectedSplit := strings.Split(string(expected), "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actual, "\n---\n")
sort.Strings(actualSplit)
assert.Equal(len(expectedSplit), len(actualSplit))
// Build a map with the same structe as result: filepaths -> rendered template.
expectedData := map[string]string{}
err = filepath.Walk(testDataPath, buildTestdataMap(tc.csp.String(), expectedData, require))
require.NoError(err)
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile))
}
}
compareMaps(expectedData, result, assert, require, t)
})
}
}
// compareMaps ensures that both maps specify the same templates.
func compareMaps(expectedData map[string]string, result map[string]string, assert *assert.Assertions, require *require.Assertions, t *testing.T) {
// This whole block is only to produce useful error messages.
// It should allow a developer to see the missing template from just the error message.
if len(expectedData) > len(result) {
keys := getKeys(expectedData)
sort.Strings(keys)
t.Logf("expected these templates:\n%s", strings.Join(keys, "\n"))
keys = getKeys(result)
sort.Strings(keys)
t.Logf("got these templates:\n%s", strings.Join(keys, "\n"))
require.FailNow("missing templates in results.")
}
// Walk the map and compare each result with it's expected render.
// Results where the expected-file is missing are errors.
for k, actualTemplates := range result {
if len(strings.TrimSpace(actualTemplates)) == 0 {
continue
}
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
renderedTemplates, ok := expectedData[k]
require.True(ok, fmt.Sprintf("unexpected render in results, missing file with expected data: %s len: %d", k, len(actualTemplates)))
expectedSplit := strings.Split(renderedTemplates, "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actualTemplates, "\n---\n")
sort.Strings(actualSplit)
require.Equal(len(expectedSplit), len(actualSplit))
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", k))
}
}
}
func getKeys(input map[string]string) []string {
keys := []string{}
for k := range input {
keys = append(keys, k)
}
return keys
}
func buildTestdataMap(csp string, expectedData map[string]string, require *require.Assertions) func(path string, info fs.FileInfo, err error) error {
return func(currentPath string, _ os.FileInfo, err error) error {
if err != nil {
return err
}
if !strings.HasSuffix(currentPath, ".yaml") {
return nil
}
_, after, _ := strings.Cut(currentPath, "testdata/"+csp+"/")
data, err := os.ReadFile(currentPath)
require.NoError(err)
_, ok := expectedData[after]
require.False(ok, "read same path twice during expected data collection.")
expectedData[after] = string(data)
return nil
}
}
func prepareGCPValues(values map[string]any) error {
joinVals, ok := values["join-service"].(map[string]any)
if !ok {

View File

@ -1,143 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: constellation-operator-controller-manager
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: constellation-operator-controller-manager
namespace: testNamespace
labels:
control-plane: controller-manager
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
template:
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
command:
- /manager
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
- name: CONSTEL_CSP
value: QEMU
- name: constellation-uid
value: "42424242424242"
image: ghcr.io/edgelesssys/constellation/node-operator:v2.3.0-pre.0.20221108173951-34435e439604
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
- mountPath: /host/usr/lib/os-release
name: usr-lib-os-release
- mountPath: /etc/os-release
name: etc-os-release
- mountPath: /etc/azure
name: azureconfig
readOnly: true
- mountPath: /etc/gce
name: gceconf
readOnly: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
securityContext:
runAsUser: 0
serviceAccountName: constellation-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- hostPath:
path: /etc/kubernetes/pki/etcd
type: Directory
name: etcd-certs
- hostPath:
path: /usr/lib/os-release
type: File
name: usr-lib-os-release
- hostPath:
path: /etc/os-release
type: File
name: etc-os-release
- name: azureconfig
secret:
optional: true
secretName: azureconfig
- configMap:
name: gceconf
optional: true
name: gceconf

View File

@ -1,61 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: constellation-operator-leader-election-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: constellation-operator-leader-election-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'constellation-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -1,23 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: constellation-operator-manager-config
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
data:
controller_manager_config.yaml: |
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
health:
healthProbeBindAddress: ":8081"
kind: ControllerManagerConfig
leaderElection:
leaderElect: true
resourceName: "38cc1645.edgeless.systems"
metrics:
bindAddress: "127.0.0.1:8080"
webhook:
port: 9443

View File

@ -1,183 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-manager-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- get
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
verbs:
- get
- list
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-manager-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-metrics-reader
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: constellation-operator-controller-manager-metrics-service
namespace: testNamespace
labels:
control-plane: controller-manager
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https

View File

@ -1,42 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-proxy-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-proxy-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -1,112 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-maintenance-operator-controller-manager
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-maintenance-operator-controller-manager
namespace: testNamespace
labels:
control-plane: controller-manager
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
template:
metadata:
labels:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=:8080
- --leader-elect
command:
- /manager
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
image: ghcr.io/edgelesssys/constellation/node-maintenance-operator:v0.13.1-alpha1
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
serviceAccountName: node-maintenance-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert

View File

@ -1,63 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: node-maintenance-operator-leader-election-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: node-maintenance-operator-leader-election-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'node-maintenance-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -1,131 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-manager-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/finalizers
verbs:
- update
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/status
verbs:
- get
- patch
- update
- apiGroups:
- oauth.openshift.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-manager-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-metrics-reader
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -1,24 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-controller-manager-metrics-service
namespace: testNamespace
labels:
control-plane: controller-manager
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https

View File

@ -1,44 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-proxy-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-proxy-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -1,12 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: node-maintenance-operator-selfsigned-issuer
namespace: testNamespace
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
selfSigned: {}

View File

@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: node-maintenance-operator-serving-cert
namespace: testNamespace
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
dnsNames:
- 'node-maintenance-operator-webhook-service.testNamespace.svc'
- 'node-maintenance-operator-webhook-service.testNamespace.svc.cluster.local'
issuerRef:
kind: Issuer
name: node-maintenance-operator-selfsigned-issuer
secretName: webhook-server-cert

View File

@ -1,34 +0,0 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: node-maintenance-operator-validating-webhook-configuration
namespace: testNamespace
annotations:
cert-manager.io/inject-ca-from: testNamespace/node-maintenance-operator-serving-cert
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: node-maintenance-operator-webhook-service
namespace: testNamespace
path: /validate-nodemaintenance-medik8s-io-v1beta1-nodemaintenance
failurePolicy: Fail
name: vnodemaintenance.kb.io
rules:
- apiGroups:
- nodemaintenance.medik8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- nodemaintenances
sideEffects: None
timeoutSeconds: 15

View File

@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-webhook-service
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
ports:
- port: 443
protocol: TCP
targetPort: 9443

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: testNamespace

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: testNamespace