From 18fe34c58b33bc411139db0c4a92ce3375dc2f34 Mon Sep 17 00:00:00 2001 From: Otto Bittner Date: Fri, 25 Nov 2022 14:56:30 +0100 Subject: [PATCH] loader_test now compares all documents in one file Previously only the first document was compared due to an issue in testify. Also update testdata to match the adjusted expectations. --- .../autoscaler/templates/azure-secret.yaml | 2 +- .../join-service/templates/configmap.yaml | 4 +- cli/internal/helm/loader_test.go | 50 ++++-- .../autoscaler/templates/azure-secret.yaml | 1 - .../templates/csi-azuredisk-controller.yaml | 6 +- .../templates/csi-azuredisk-node.yaml | 4 +- .../rbac-csi-azuredisk-controller.yaml | 148 ++++++++++-------- .../templates/rbac-csi-azuredisk-node.yaml | 3 +- .../rbac-csi-snapshot-controller.yaml | 0 .../templates/cluster_setup.yaml | 13 +- .../templates/controller.yaml | 6 +- .../templates/node.yaml | 7 + 12 files changed, 151 insertions(+), 93 deletions(-) delete mode 100644 cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/autoscaler/templates/azure-secret.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/autoscaler/templates/azure-secret.yaml index 62fa109af..3d1432fc7 100644 --- a/cli/internal/helm/charts/edgeless/constellation-services/charts/autoscaler/templates/azure-secret.yaml +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/autoscaler/templates/azure-secret.yaml @@ -10,6 +10,6 @@ data: ResourceGroup: {{ .Values.Azure.resourceGroup | b64enc }} SubscriptionID: {{ .Values.Azure.subscriptionID | b64enc }} TenantID: {{ .Values.Azure.tenantID | b64enc }} - # b64encode("vmss") + {{/* b64encode("vmss") */}} VMType: dm1zcw== {{- end -}} diff --git a/cli/internal/helm/charts/edgeless/constellation-services/charts/join-service/templates/configmap.yaml b/cli/internal/helm/charts/edgeless/constellation-services/charts/join-service/templates/configmap.yaml index 41867f260..1973ae9b8 100644 --- a/cli/internal/helm/charts/edgeless/constellation-services/charts/join-service/templates/configmap.yaml +++ b/cli/internal/helm/charts/edgeless/constellation-services/charts/join-service/templates/configmap.yaml @@ -4,10 +4,10 @@ metadata: name: join-config namespace: {{ .Release.Namespace }} data: - # mustToJson is required so the json-strings passed from go are of type string in the rendered yaml. + {{/* mustToJson is required so the json-strings passed from go are of type string in the rendered yaml. */}} measurements: {{ .Values.measurements | mustToJson }} {{- if eq .Values.csp "Azure" }} - # ConfigMap.data is of type map[string]string. quote will not quote a quoted string. + {{/* ConfigMap.data is of type map[string]string. quote will not quote a quoted string. */}} enforceIdKeyDigest: {{ .Values.enforceIdKeyDigest | quote }} idkeydigest: {{ .Values.idkeydigest | quote }} {{- end }} diff --git a/cli/internal/helm/loader_test.go b/cli/internal/helm/loader_test.go index 909e00c0b..7acfc263b 100644 --- a/cli/internal/helm/loader_test.go +++ b/cli/internal/helm/loader_test.go @@ -13,6 +13,8 @@ import ( "io/fs" "os" "path" + "sort" + "strings" "testing" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" @@ -117,17 +119,29 @@ func TestConstellationServices(t *testing.T) { result, err := engine.Render(chart, valuesToRender) require.NoError(err) - for k, v := range result { + for k, actual := range result { currentFile := path.Join("testdata", tc.config.GetProvider().String(), k) - content, err := os.ReadFile(currentFile) - + expected, err := os.ReadFile(currentFile) // If a file does not exist, we expect the render for that path to be empty. if errors.Is(err, fs.ErrNotExist) { - assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile)) + assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile)) continue } - assert.NoError(err) - assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile)) + require.NoError(err) + + // testify has an issue where when multiple documents are contained in one YAML string, + // only the first document is parsed [1]. For this reason we split the YAML string + // into a slice of strings, each entry containing one document. + // [1] https://github.com/stretchr/testify/issues/1281 + expectedSplit := strings.Split(string(expected), "\n---\n") + sort.Strings(expectedSplit) + actualSplit := strings.Split(actual, "\n---\n") + sort.Strings(actualSplit) + assert.Equal(len(expectedSplit), len(actualSplit)) + + for i := range expectedSplit { + assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile)) + } } }) } @@ -180,17 +194,29 @@ func TestOperators(t *testing.T) { result, err := engine.Render(chart, valuesToRender) require.NoError(err) - for k, v := range result { + for k, actual := range result { currentFile := path.Join("testdata", tc.csp.String(), k) - content, err := os.ReadFile(currentFile) - + expected, err := os.ReadFile(currentFile) // If a file does not exist, we expect the render for that path to be empty. if errors.Is(err, fs.ErrNotExist) { - assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile)) + assert.YAMLEq("", actual, fmt.Sprintf("current file: %s", currentFile)) continue } - assert.NoError(err) - assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile)) + require.NoError(err) + + // testify has an issue where when multiple documents are contained in one YAML string, + // only the first document is parsed [1]. For this reason we split the YAML string + // into a slice of strings, each entry containing one document. + // [1] https://github.com/stretchr/testify/issues/1281 + expectedSplit := strings.Split(string(expected), "\n---\n") + sort.Strings(expectedSplit) + actualSplit := strings.Split(actual, "\n---\n") + sort.Strings(actualSplit) + assert.Equal(len(expectedSplit), len(actualSplit)) + + for i := range expectedSplit { + assert.YAMLEq(expectedSplit[i], actualSplit[i], fmt.Sprintf("current file: %s", currentFile)) + } } }) } diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/autoscaler/templates/azure-secret.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/autoscaler/templates/azure-secret.yaml index d757253d2..58092bc5e 100644 --- a/cli/internal/helm/testdata/Azure/constellation-services/charts/autoscaler/templates/azure-secret.yaml +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/autoscaler/templates/azure-secret.yaml @@ -9,5 +9,4 @@ data: ResourceGroup: cmVzb3VyY2VHcm91cA== SubscriptionID: c3Vic2NyaXB0aW9uSUQ= TenantID: VGVuYW50SUQ= - # b64encode("vmss") VMType: dm1zcw== diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml index fe103ad0b..9d8e6ab63 100644 --- a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -44,7 +44,7 @@ spec: operator: Exists - effect: NoSchedule key: node.kubernetes.io/not-ready - operator: Exists + operator: Exists containers: - name: csi-provisioner image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.2.0" @@ -188,7 +188,7 @@ spec: - name: CSI_ENDPOINT value: unix:///csi/csi.sock - name: AZURE_GO_SDK_LOG_LEVEL - value: + value: imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /csi @@ -213,4 +213,4 @@ spec: path: /etc/ssl/certs - name: ssl-pki hostPath: - path: /etc/pki/ca-trust/extracted + path: /etc/pki/ca-trust/extracted diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml index b78cece7f..9e8f5041c 100644 --- a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -41,7 +41,7 @@ spec: - virtual-kubelet priorityClassName: system-node-critical tolerations: - - operator: Exists + - operator: Exists containers: - name: liveness-probe volumeMounts: @@ -134,7 +134,7 @@ spec: apiVersion: v1 fieldPath: spec.nodeName - name: AZURE_GO_SDK_LOG_LEVEL - value: + value: imagePullPolicy: IfNotPresent securityContext: privileged: true diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml index 6ff2d2cc9..2c3a4c7dd 100644 --- a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml @@ -36,7 +36,30 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] + --- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-provisioner-binding + labels: + app.kubernetes.io/instance: "testRelease" + app.kubernetes.io/managed-by: "Helm" + app.kubernetes.io/name: "azuredisk-csi-driver" + app.kubernetes.io/version: "v1.0.1" + helm.sh/chart: "azuredisk-csi-driver-v1.0.1" +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: testNamespace +roleRef: + kind: ClusterRole + name: azuredisk-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -67,6 +90,28 @@ rules: resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] --- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-attacher-binding + labels: + app.kubernetes.io/instance: "testRelease" + app.kubernetes.io/managed-by: "Helm" + app.kubernetes.io/name: "azuredisk-csi-driver" + app.kubernetes.io/version: "v1.0.1" + helm.sh/chart: "azuredisk-csi-driver-v1.0.1" +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: testNamespace +roleRef: + kind: ClusterRole + name: azuredisk-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -97,6 +142,27 @@ rules: resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create", "patch"] --- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: azuredisk-csi-snapshotter-binding + labels: + app.kubernetes.io/instance: "testRelease" + app.kubernetes.io/managed-by: "Helm" + app.kubernetes.io/name: "azuredisk-csi-driver" + app.kubernetes.io/version: "v1.0.1" + helm.sh/chart: "azuredisk-csi-driver-v1.0.1" +subjects: + - kind: ServiceAccount + name: csi-azuredisk-controller-sa + namespace: testNamespace +roleRef: + kind: ClusterRole + name: azuredisk-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -127,72 +193,6 @@ rules: resources: ["pods"] verbs: ["get", "list", "watch"] --- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: azuredisk-csi-driver-controller-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: azuredisk-csi-provisioner-binding - labels: - app.kubernetes.io/instance: "testRelease" - app.kubernetes.io/managed-by: "Helm" - app.kubernetes.io/name: "azuredisk-csi-driver" - app.kubernetes.io/version: "v1.0.1" - helm.sh/chart: "azuredisk-csi-driver-v1.0.1" -subjects: - - kind: ServiceAccount - name: csi-azuredisk-controller-sa - namespace: testNamespace -roleRef: - kind: ClusterRole - name: azuredisk-external-provisioner-role - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: azuredisk-csi-attacher-binding - labels: - app.kubernetes.io/instance: "testRelease" - app.kubernetes.io/managed-by: "Helm" - app.kubernetes.io/name: "azuredisk-csi-driver" - app.kubernetes.io/version: "v1.0.1" - helm.sh/chart: "azuredisk-csi-driver-v1.0.1" -subjects: - - kind: ServiceAccount - name: csi-azuredisk-controller-sa - namespace: testNamespace -roleRef: - kind: ClusterRole - name: azuredisk-external-attacher-role - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: azuredisk-csi-snapshotter-binding - labels: - app.kubernetes.io/instance: "testRelease" - app.kubernetes.io/managed-by: "Helm" - app.kubernetes.io/name: "azuredisk-csi-driver" - app.kubernetes.io/version: "v1.0.1" - helm.sh/chart: "azuredisk-csi-driver-v1.0.1" -subjects: - - kind: ServiceAccount - name: csi-azuredisk-controller-sa - namespace: testNamespace -roleRef: - kind: ClusterRole - name: azuredisk-external-snapshotter-role - apiGroup: rbac.authorization.k8s.io ---- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -211,17 +211,27 @@ roleRef: kind: ClusterRole name: azuredisk-external-resizer-role apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-azuredisk-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: azuredisk-csi-driver-controller-secret-binding + name: csi-azuredisk-controller-secret-binding subjects: - kind: ServiceAccount name: csi-azuredisk-controller-sa namespace: testNamespace roleRef: kind: ClusterRole - name: azuredisk-csi-driver-controller-secret-role + name: csi-azuredisk-controller-secret-role apiGroup: rbac.authorization.k8s.io ---- diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml index 2b8e90638..036f06807 100644 --- a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml +++ b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml @@ -1,3 +1,4 @@ +--- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -13,7 +14,7 @@ rules: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: azuredisk-csi-driver-node-secret-binding + name: csi-azuredisk-node-secret-binding subjects: - kind: ServiceAccount name: csi-azuredisk-node-sa diff --git a/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/cli/internal/helm/testdata/Azure/constellation-services/charts/azuredisk-csi-driver/templates/rbac-csi-snapshot-controller.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/cluster_setup.yaml b/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/cluster_setup.yaml index 5202fd80a..b5b472dd3 100644 --- a/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/cluster_setup.yaml +++ b/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/cluster_setup.yaml @@ -1,3 +1,4 @@ +##### Node Service Account, Roles, RoleBindings apiVersion: v1 kind: ServiceAccount metadata: @@ -6,6 +7,7 @@ metadata: --- +##### Controller Service Account, Roles, Rolebindings apiVersion: v1 kind: ServiceAccount metadata: @@ -14,6 +16,7 @@ metadata: --- +# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -43,6 +46,10 @@ rules: - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["get", "list"] + # Access to volumeattachments is only needed when the CSI driver + # has the PUBLISH_UNPUBLISH_VOLUME controller capability. + # In that case, external-provisioner will watch volumeattachments + # to determine when it is safe to delete a volume. - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch"] @@ -61,9 +68,10 @@ roleRef: kind: ClusterRole name: csi-gce-pd-provisioner-role apiGroup: rbac.authorization.k8s.io - + --- +# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -122,6 +130,7 @@ description: "This priority class should be used for the GCE PD CSI driver node --- +# Resizer must be able to work with PVCs, PVs, SCs. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -139,6 +148,7 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] + # If handle-volume-inuse-error=true, the pod specific rbac is needed - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] @@ -239,6 +249,7 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] + # Secrets resource omitted since GCE PD snapshots does not require them - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] diff --git a/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/controller.yaml b/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/controller.yaml index d0d855bd6..7006b9ab1 100644 --- a/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/controller.yaml +++ b/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/controller.yaml @@ -27,8 +27,8 @@ spec: key: node.kubernetes.io/not-ready operator: Exists nodeSelector: - node-role.kubernetes.io/control-plane: "" kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" serviceAccountName: csi-gce-pd-controller-sa priorityClassName: csi-gce-pd-controller containers: @@ -43,6 +43,8 @@ spec: - "--leader-election-namespace=$(PDCSI_NAMESPACE)" - "--timeout=450s" - "--extra-create-metadata" + # - "--run-controller-service=false" # disable the controller service of the CSI driver + # - "--run-node-service=false" # disable the node service of the CSI driver - "--leader-election" - "--default-fstype=ext4" - "--controller-publish-readonly=true" @@ -145,6 +147,8 @@ spec: - name: socket-dir mountPath: /csi - name: gce-pd-driver + # Don't change base image without changing pdImagePlaceholder in + # test/k8s-integration/main.go image: csi-driver:v0.0.0 imagePullPolicy: IfNotPresent args: diff --git a/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/node.yaml b/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/node.yaml index 201bc885c..7440a026c 100644 --- a/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/node.yaml +++ b/cli/internal/helm/testdata/GCP/constellation-services/charts/gcp-compute-persistent-disk-csi-driver/templates/node.yaml @@ -59,6 +59,8 @@ spec: mountPath: /csi - name: device-dir mountPath: /dev + # The following mounts are required to trigger host udevadm from + # container - name: udev-rules-etc mountPath: /etc/udev - name: udev-rules-lib @@ -86,6 +88,8 @@ spec: hostPath: path: /dev type: Directory + # The following mounts are required to trigger host udevadm from + # container - name: udev-rules-etc hostPath: path: /etc/udev @@ -106,5 +110,8 @@ spec: hostPath: path: /run/cryptsetup type: Directory + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # See "special case". This will tolerate everything. Node component should + # be scheduled on all nodes. tolerations: - operator: Exists