loader_test now compares all documents in one file

Previously only the first document was compared due to
an issue in testify.
Also update testdata to match the adjusted expectations.
This commit is contained in:
Otto Bittner 2022-11-25 14:56:30 +01:00
parent c71fd89e80
commit 18fe34c58b
12 changed files with 151 additions and 93 deletions

View File

@ -10,6 +10,6 @@ data:
ResourceGroup: {{ .Values.Azure.resourceGroup | b64enc }}
SubscriptionID: {{ .Values.Azure.subscriptionID | b64enc }}
TenantID: {{ .Values.Azure.tenantID | b64enc }}
# b64encode("vmss")
{{/* b64encode("vmss") */}}
VMType: dm1zcw==
{{- end -}}

View File

@ -4,10 +4,10 @@ metadata:
name: join-config
namespace: {{ .Release.Namespace }}
data:
# mustToJson is required so the json-strings passed from go are of type string in the rendered yaml.
{{/* mustToJson is required so the json-strings passed from go are of type string in the rendered yaml. */}}
measurements: {{ .Values.measurements | mustToJson }}
{{- if eq .Values.csp "Azure" }}
# ConfigMap.data is of type map[string]string. quote will not quote a quoted string.
{{/* ConfigMap.data is of type map[string]string. quote will not quote a quoted string. */}}
enforceIdKeyDigest: {{ .Values.enforceIdKeyDigest | quote }}
idkeydigest: {{ .Values.idkeydigest | quote }}
{{- end }}

View File

@ -13,6 +13,8 @@ import (
"io/fs"
"os"
"path"
"sort"
"strings"
"testing"
"github.com/edgelesssys/constellation/v2/internal/attestation/measurements"
@ -117,17 +119,29 @@ func TestConstellationServices(t *testing.T) {
result, err := engine.Render(chart, valuesToRender)
require.NoError(err)
for k, v := range result {
for k, actual := range result {
currentFile := path.Join("testdata", tc.config.GetProvider().String(), k)
content, err := os.ReadFile(currentFile)
expected, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile))
assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile))
continue
}
assert.NoError(err)
assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile))
require.NoError(err)
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
expectedSplit := strings.Split(string(expected), "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actual, "\n---\n")
sort.Strings(actualSplit)
assert.Equal(len(expectedSplit), len(actualSplit))
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile))
}
}
})
}
@ -180,17 +194,29 @@ func TestOperators(t *testing.T) {
result, err := engine.Render(chart, valuesToRender)
require.NoError(err)
for k, v := range result {
for k, actual := range result {
currentFile := path.Join("testdata", tc.csp.String(), k)
content, err := os.ReadFile(currentFile)
expected, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile))
assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile))
continue
}
assert.NoError(err)
assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile))
require.NoError(err)
// testify has an issue where when multiple documents are contained in one YAML string,
// only the first document is parsed [1]. For this reason we split the YAML string
// into a slice of strings, each entry containing one document.
// [1] https://github.com/stretchr/testify/issues/1281
expectedSplit := strings.Split(string(expected), "\n---\n")
sort.Strings(expectedSplit)
actualSplit := strings.Split(actual, "\n---\n")
sort.Strings(actualSplit)
assert.Equal(len(expectedSplit), len(actualSplit))
for i := range expectedSplit {
assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile))
}
}
})
}

View File

@ -9,5 +9,4 @@ data:
ResourceGroup: cmVzb3VyY2VHcm91cA==
SubscriptionID: c3Vic2NyaXB0aW9uSUQ=
TenantID: VGVuYW50SUQ=
# b64encode("vmss")
VMType: dm1zcw==

View File

@ -44,7 +44,7 @@ spec:
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
operator: Exists
containers:
- name: csi-provisioner
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.2.0"
@ -188,7 +188,7 @@ spec:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: AZURE_GO_SDK_LOG_LEVEL
value:
value:
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /csi
@ -213,4 +213,4 @@ spec:
path: /etc/ssl/certs
- name: ssl-pki
hostPath:
path: /etc/pki/ca-trust/extracted
path: /etc/pki/ca-trust/extracted

View File

@ -41,7 +41,7 @@ spec:
- virtual-kubelet
priorityClassName: system-node-critical
tolerations:
- operator: Exists
- operator: Exists
containers:
- name: liveness-probe
volumeMounts:
@ -134,7 +134,7 @@ spec:
apiVersion: v1
fieldPath: spec.nodeName
- name: AZURE_GO_SDK_LOG_LEVEL
value:
value:
imagePullPolicy: IfNotPresent
securityContext:
privileged: true

View File

@ -36,7 +36,30 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -67,6 +90,28 @@ rules:
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -97,6 +142,27 @@ rules:
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -127,72 +193,6 @@ rules:
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-driver-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
labels:
app.kubernetes.io/instance: "testRelease"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.0.1"
helm.sh/chart: "azuredisk-csi-driver-v1.0.1"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -211,17 +211,27 @@ roleRef:
kind: ClusterRole
name: azuredisk-external-resizer-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-driver-controller-secret-binding
name: csi-azuredisk-controller-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: testNamespace
roleRef:
kind: ClusterRole
name: azuredisk-csi-driver-controller-secret-role
name: csi-azuredisk-controller-secret-role
apiGroup: rbac.authorization.k8s.io
---

View File

@ -1,3 +1,4 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -13,7 +14,7 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-driver-node-secret-binding
name: csi-azuredisk-node-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-node-sa

View File

@ -1,3 +1,4 @@
##### Node Service Account, Roles, RoleBindings
apiVersion: v1
kind: ServiceAccount
metadata:
@ -6,6 +7,7 @@ metadata:
---
##### Controller Service Account, Roles, Rolebindings
apiVersion: v1
kind: ServiceAccount
metadata:
@ -14,6 +16,7 @@ metadata:
---
# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -43,6 +46,10 @@ rules:
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
# Access to volumeattachments is only needed when the CSI driver
# has the PUBLISH_UNPUBLISH_VOLUME controller capability.
# In that case, external-provisioner will watch volumeattachments
# to determine when it is safe to delete a volume.
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
@ -61,9 +68,10 @@ roleRef:
kind: ClusterRole
name: csi-gce-pd-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -122,6 +130,7 @@ description: "This priority class should be used for the GCE PD CSI driver node
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -139,6 +148,7 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# If handle-volume-inuse-error=true, the pod specific rbac is needed
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
@ -239,6 +249,7 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# Secrets resource omitted since GCE PD snapshots does not require them
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]

View File

@ -27,8 +27,8 @@ spec:
key: node.kubernetes.io/not-ready
operator: Exists
nodeSelector:
node-role.kubernetes.io/control-plane: ""
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
serviceAccountName: csi-gce-pd-controller-sa
priorityClassName: csi-gce-pd-controller
containers:
@ -43,6 +43,8 @@ spec:
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=450s"
- "--extra-create-metadata"
# - "--run-controller-service=false" # disable the controller service of the CSI driver
# - "--run-node-service=false" # disable the node service of the CSI driver
- "--leader-election"
- "--default-fstype=ext4"
- "--controller-publish-readonly=true"
@ -145,6 +147,8 @@ spec:
- name: socket-dir
mountPath: /csi
- name: gce-pd-driver
# Don't change base image without changing pdImagePlaceholder in
# test/k8s-integration/main.go
image: csi-driver:v0.0.0
imagePullPolicy: IfNotPresent
args:

View File

@ -59,6 +59,8 @@ spec:
mountPath: /csi
- name: device-dir
mountPath: /dev
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
mountPath: /etc/udev
- name: udev-rules-lib
@ -86,6 +88,8 @@ spec:
hostPath:
path: /dev
type: Directory
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
hostPath:
path: /etc/udev
@ -106,5 +110,8 @@ spec:
hostPath:
path: /run/cryptsetup
type: Directory
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations:
- operator: Exists