loader_test now compares all documents in one file

Previously only the first document was compared due to
an issue in testify.
Also update testdata to match the adjusted expectations.
This commit is contained in:
Otto Bittner 2022-11-25 14:56:30 +01:00
parent c71fd89e80
commit 18fe34c58b
12 changed files with 151 additions and 93 deletions

View file

@ -10,6 +10,6 @@ data:
ResourceGroup: {{ .Values.Azure.resourceGroup | b64enc }} ResourceGroup: {{ .Values.Azure.resourceGroup | b64enc }}
SubscriptionID: {{ .Values.Azure.subscriptionID | b64enc }} SubscriptionID: {{ .Values.Azure.subscriptionID | b64enc }}
TenantID: {{ .Values.Azure.tenantID | b64enc }} TenantID: {{ .Values.Azure.tenantID | b64enc }}
# b64encode("vmss") {{/* b64encode("vmss") */}}
VMType: dm1zcw== VMType: dm1zcw==
{{- end -}} {{- end -}}

View file

@ -4,10 +4,10 @@ metadata:
name: join-config name: join-config
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
data: data:
# mustToJson is required so the json-strings passed from go are of type string in the rendered yaml. {{/* mustToJson is required so the json-strings passed from go are of type string in the rendered yaml. */}}
measurements: {{ .Values.measurements | mustToJson }} measurements: {{ .Values.measurements | mustToJson }}
{{- if eq .Values.csp "Azure" }} {{- if eq .Values.csp "Azure" }}
# ConfigMap.data is of type map[string]string. quote will not quote a quoted string. {{/* ConfigMap.data is of type map[string]string. quote will not quote a quoted string. */}}
enforceIdKeyDigest: {{ .Values.enforceIdKeyDigest | quote }} enforceIdKeyDigest: {{ .Values.enforceIdKeyDigest | quote }}
idkeydigest: {{ .Values.idkeydigest | quote }} idkeydigest: {{ .Values.idkeydigest | quote }}
{{- end }} {{- end }}

View file

@ -13,6 +13,8 @@ import (
"io/fs" "io/fs"
"os" "os"
"path" "path"
"sort"
"strings"
"testing" "testing"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -117,17 +119,29 @@ func TestConstellationServices(t *testing.T) {
result, err := engine.Render(chart, valuesToRender) result, err := engine.Render(chart, valuesToRender)
require.NoError(err) require.NoError(err)
for k, v := range result { for k, actual := range result {
currentFile := path.Join("testdata", tc.config.GetProvider().String(), k) currentFile := path.Join("testdata", tc.config.GetProvider().String(), k)
content, err := os.ReadFile(currentFile) expected, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty. // If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile)) assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile))
continue continue
} }
assert.NoError(err) require.NoError(err)
assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile))
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
expectedSplit := strings.Split(string(expected), "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actual, "\n---\n")
sort.Strings(actualSplit)
assert.Equal(len(expectedSplit), len(actualSplit))
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile))
}
} }
}) })
} }
@ -180,17 +194,29 @@ func TestOperators(t *testing.T) {
result, err := engine.Render(chart, valuesToRender) result, err := engine.Render(chart, valuesToRender)
require.NoError(err) require.NoError(err)
for k, v := range result { for k, actual := range result {
currentFile := path.Join("testdata", tc.csp.String(), k) currentFile := path.Join("testdata", tc.csp.String(), k)
content, err := os.ReadFile(currentFile) expected, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty. // If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile)) assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile))
continue continue
} }
assert.NoError(err) require.NoError(err)
assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile))
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
expectedSplit := strings.Split(string(expected), "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actual, "\n---\n")
sort.Strings(actualSplit)
assert.Equal(len(expectedSplit), len(actualSplit))
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile))
}
} }
}) })
} }

View file

@ -9,5 +9,4 @@ data:
ResourceGroup: cmVzb3VyY2VHcm91cA== ResourceGroup: cmVzb3VyY2VHcm91cA==
SubscriptionID: c3Vic2NyaXB0aW9uSUQ= SubscriptionID: c3Vic2NyaXB0aW9uSUQ=
TenantID: VGVuYW50SUQ= TenantID: VGVuYW50SUQ=
# b64encode("vmss")
VMType: dm1zcw== VMType: dm1zcw==

View file

@ -44,7 +44,7 @@ spec:
operator: Exists operator: Exists
- effect: NoSchedule - effect: NoSchedule
key: node.kubernetes.io/not-ready key: node.kubernetes.io/not-ready
operator: Exists operator: Exists
containers: containers:
- name: csi-provisioner - name: csi-provisioner
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.2.0" image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.2.0"
@ -188,7 +188,7 @@ spec:
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///csi/csi.sock value: unix:///csi/csi.sock
- name: AZURE_GO_SDK_LOG_LEVEL - name: AZURE_GO_SDK_LOG_LEVEL
value: value:
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- mountPath: /csi - mountPath: /csi
@ -213,4 +213,4 @@ spec:
path: /etc/ssl/certs path: /etc/ssl/certs
- name: ssl-pki - name: ssl-pki
hostPath: hostPath:
path: /etc/pki/ca-trust/extracted path: /etc/pki/ca-trust/extracted

View file

@ -41,7 +41,7 @@ spec:
- virtual-kubelet - virtual-kubelet
priorityClassName: system-node-critical priorityClassName: system-node-critical
tolerations: tolerations:
- operator: Exists - operator: Exists
containers: containers:
- name: liveness-probe - name: liveness-probe
volumeMounts: volumeMounts:
@ -134,7 +134,7 @@ spec:
apiVersion: v1 apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: AZURE_GO_SDK_LOG_LEVEL - name: AZURE_GO_SDK_LOG_LEVEL
value: value:
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
privileged: true privileged: true

View file

@ -36,7 +36,30 @@ rules:
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
--- ---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -67,6 +90,28 @@ rules:
resources: ["leases"] resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
--- ---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -97,6 +142,27 @@ rules:
resources: ["leases"] resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
--- ---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -127,72 +193,6 @@ rules:
resources: ["pods"] resources: ["pods"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
--- ---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-driver-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -211,17 +211,27 @@ roleRef:
kind: ClusterRole kind: ClusterRole
name: azuredisk-external-resizer-role name: azuredisk-external-resizer-role
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: azuredisk-csi-driver-controller-secret-binding name: csi-azuredisk-controller-secret-binding
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: csi-azuredisk-controller-sa name: csi-azuredisk-controller-sa
namespace: testNamespace namespace: testNamespace
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
name: azuredisk-csi-driver-controller-secret-role name: csi-azuredisk-controller-secret-role
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
---

View file

@ -1,3 +1,4 @@
---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -13,7 +14,7 @@ rules:
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: azuredisk-csi-driver-node-secret-binding name: csi-azuredisk-node-secret-binding
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: csi-azuredisk-node-sa name: csi-azuredisk-node-sa

View file

@ -1,3 +1,4 @@
##### Node Service Account, Roles, RoleBindings
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
@ -6,6 +7,7 @@ metadata:
--- ---
##### Controller Service Account, Roles, Rolebindings
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
@ -14,6 +16,7 @@ metadata:
--- ---
# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -43,6 +46,10 @@ rules:
- apiGroups: ["snapshot.storage.k8s.io"] - apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"] resources: ["volumesnapshotcontents"]
verbs: ["get", "list"] verbs: ["get", "list"]
# Access to volumeattachments is only needed when the CSI driver
# has the PUBLISH_UNPUBLISH_VOLUME controller capability.
# In that case, external-provisioner will watch volumeattachments
# to determine when it is safe to delete a volume.
- apiGroups: ["storage.k8s.io"] - apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"] resources: ["volumeattachments"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
@ -61,9 +68,10 @@ roleRef:
kind: ClusterRole kind: ClusterRole
name: csi-gce-pd-provisioner-role name: csi-gce-pd-provisioner-role
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
--- ---
# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -122,6 +130,7 @@ description: "This priority class should be used for the GCE PD CSI driver node
--- ---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -139,6 +148,7 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["events"] resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"] verbs: ["list", "watch", "create", "update", "patch"]
# If handle-volume-inuse-error=true, the pod specific rbac is needed
- apiGroups: [""] - apiGroups: [""]
resources: ["pods"] resources: ["pods"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
@ -239,6 +249,7 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["events"] resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"] verbs: ["list", "watch", "create", "update", "patch"]
# Secrets resource omitted since GCE PD snapshots does not require them
- apiGroups: ["snapshot.storage.k8s.io"] - apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"] resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]

View file

@ -27,8 +27,8 @@ spec:
key: node.kubernetes.io/not-ready key: node.kubernetes.io/not-ready
operator: Exists operator: Exists
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: ""
kubernetes.io/os: linux kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
serviceAccountName: csi-gce-pd-controller-sa serviceAccountName: csi-gce-pd-controller-sa
priorityClassName: csi-gce-pd-controller priorityClassName: csi-gce-pd-controller
containers: containers:
@ -43,6 +43,8 @@ spec:
- "--leader-election-namespace=$(PDCSI_NAMESPACE)" - "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=450s" - "--timeout=450s"
- "--extra-create-metadata" - "--extra-create-metadata"
# - "--run-controller-service=false" # disable the controller service of the CSI driver
# - "--run-node-service=false" # disable the node service of the CSI driver
- "--leader-election" - "--leader-election"
- "--default-fstype=ext4" - "--default-fstype=ext4"
- "--controller-publish-readonly=true" - "--controller-publish-readonly=true"
@ -145,6 +147,8 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: gce-pd-driver - name: gce-pd-driver
# Don't change base image without changing pdImagePlaceholder in
# test/k8s-integration/main.go
image: csi-driver:v0.0.0 image: csi-driver:v0.0.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
args: args:

View file

@ -59,6 +59,8 @@ spec:
mountPath: /csi mountPath: /csi
- name: device-dir - name: device-dir
mountPath: /dev mountPath: /dev
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc - name: udev-rules-etc
mountPath: /etc/udev mountPath: /etc/udev
- name: udev-rules-lib - name: udev-rules-lib
@ -86,6 +88,8 @@ spec:
hostPath: hostPath:
path: /dev path: /dev
type: Directory type: Directory
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc - name: udev-rules-etc
hostPath: hostPath:
path: /etc/udev path: /etc/udev
@ -106,5 +110,8 @@ spec:
hostPath: hostPath:
path: /run/cryptsetup path: /run/cryptsetup
type: Directory type: Directory
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations: tolerations:
- operator: Exists - operator: Exists