From 7d4ab0716312e932a5ec8893a372e1adb6e002d3 Mon Sep 17 00:00:00 2001 From: Malte Poll Date: Fri, 17 Mar 2023 09:53:22 +0100 Subject: [PATCH] helm: add tests for AWS and OpenStack --- cli/internal/helm/loader_test.go | 135 +++++++++-- .../templates/deployment.yaml | 150 ++++++++++++ .../templates/leader-election-rbac.yaml | 61 +++++ .../templates/manager-config.yaml | 23 ++ .../templates/manager-rbac.yaml | 222 ++++++++++++++++++ .../templates/metrics-reader-rbac.yaml | 15 ++ .../templates/metrics-service.yaml | 22 ++ .../templates/proxy-rbac.yaml | 42 ++++ .../templates/deployment.yaml | 112 +++++++++ .../templates/leader-election-rbac.yaml | 63 +++++ .../templates/manager-rbac.yaml | 131 +++++++++++ .../templates/metrics-reader-rbac.yaml | 16 ++ .../templates/metrics-service.yaml | 24 ++ .../templates/proxy-rbac.yaml | 44 ++++ .../templates/selfsigned-issuer.yaml | 12 + .../templates/serving-cert.yaml | 18 ++ .../validating-webhook-configuration.yaml | 34 +++ .../templates/webhook-service.yaml | 22 ++ .../autoscaler/templates/aws-deployment.yaml | 48 ++++ .../autoscaler/templates/clusterrole.yaml | 138 +++++++++++ .../templates/clusterrolebinding.yaml | 17 ++ .../templates/poddisruptionbudget.yaml | 15 ++ .../charts/autoscaler/templates/role.yaml | 26 ++ .../autoscaler/templates/rolebinding.yaml | 17 ++ .../charts/autoscaler/templates/service.yaml | 19 ++ .../autoscaler/templates/serviceaccount.yaml | 10 + .../charts/ccm/templates/aws-daemonset.yaml | 61 +++++ .../ccm/templates/clusterrolebinding.yaml | 12 + .../charts/ccm/templates/serviceaccount.yaml | 5 + .../join-service/templates/clusterrole.yaml | 45 ++++ .../templates/clusterrolebinding.yaml | 12 + .../join-service/templates/configmap.yaml | 9 + .../join-service/templates/daemonset.yaml | 67 ++++++ .../join-service/templates/service.yaml | 17 ++ .../templates/serviceaccount.yaml | 5 + .../key-service/templates/clusterrole.yaml | 13 + .../templates/clusterrolebinding.yaml | 12 + .../key-service/templates/daemonset.yaml | 62 +++++ .../key-service/templates/mastersecret.yaml | 9 + .../charts/key-service/templates/service.yaml | 16 ++ .../key-service/templates/serviceaccount.yaml | 5 + .../templates/clusterrolebinding.yaml | 15 ++ .../konnectivity/templates/daemonset.yaml | 76 ++++++ .../templates/serviceaccount.yaml | 8 + .../templates/daemonset.yaml | 51 ++++ .../templates/loadbalancer-service.yaml | 18 ++ .../templates/nodeport-service.yaml | 20 ++ .../constellation-services/templates/.gitkeep | 0 .../templates/deployment.yaml | 150 ++++++++++++ .../templates/leader-election-rbac.yaml | 61 +++++ .../templates/manager-config.yaml | 23 ++ .../templates/manager-rbac.yaml | 222 ++++++++++++++++++ .../templates/metrics-reader-rbac.yaml | 15 ++ .../templates/metrics-service.yaml | 22 ++ .../templates/proxy-rbac.yaml | 42 ++++ .../templates/deployment.yaml | 112 +++++++++ .../templates/leader-election-rbac.yaml | 63 +++++ .../templates/manager-rbac.yaml | 131 +++++++++++ .../templates/metrics-reader-rbac.yaml | 16 ++ .../templates/metrics-service.yaml | 24 ++ .../templates/proxy-rbac.yaml | 44 ++++ .../templates/selfsigned-issuer.yaml | 12 + .../templates/serving-cert.yaml | 18 ++ .../validating-webhook-configuration.yaml | 34 +++ .../templates/webhook-service.yaml | 22 ++ .../ccm/templates/clusterrolebinding.yaml | 12 + .../ccm/templates/openstack-daemonset.yaml | 68 ++++++ .../ccm/templates/openstack-secret.yaml | 7 + .../charts/ccm/templates/serviceaccount.yaml | 5 + .../join-service/templates/clusterrole.yaml | 45 ++++ .../templates/clusterrolebinding.yaml | 12 + .../join-service/templates/configmap.yaml | 9 + .../join-service/templates/daemonset.yaml | 67 ++++++ .../join-service/templates/service.yaml | 17 ++ .../templates/serviceaccount.yaml | 5 + .../key-service/templates/clusterrole.yaml | 13 + .../templates/clusterrolebinding.yaml | 12 + .../key-service/templates/daemonset.yaml | 62 +++++ .../key-service/templates/mastersecret.yaml | 9 + .../charts/key-service/templates/service.yaml | 16 ++ .../key-service/templates/serviceaccount.yaml | 5 + .../templates/clusterrolebinding.yaml | 15 ++ .../konnectivity/templates/daemonset.yaml | 76 ++++++ .../templates/serviceaccount.yaml | 8 + .../templates/daemonset.yaml | 51 ++++ .../templates/loadbalancer-service.yaml | 18 ++ .../templates/nodeport-service.yaml | 20 ++ .../constellation-services/templates/.gitkeep | 0 88 files changed, 3511 insertions(+), 26 deletions(-) create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-config.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/aws-deployment.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrole.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/poddisruptionbudget.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/role.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/rolebinding.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/aws-daemonset.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrole.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/configmap.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrole.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/mastersecret.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/nodeport-service.yaml create mode 100644 cli/internal/helm/testdata/AWS/constellation-services/templates/.gitkeep create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-config.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-daemonset.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-secret.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrole.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/configmap.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrole.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/mastersecret.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/serviceaccount.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/daemonset.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/nodeport-service.yaml create mode 100644 cli/internal/helm/testdata/OpenStack/constellation-services/templates/.gitkeep diff --git a/cli/internal/helm/loader_test.go b/cli/internal/helm/loader_test.go index c3136a15e..ac3634c9b 100644 --- a/cli/internal/helm/loader_test.go +++ b/cli/internal/helm/loader_test.go @@ -59,16 +59,13 @@ func TestConstellationServices(t *testing.T) { ccmImage string cnmImage string }{ - "GCP": { + "AWS": { config: &config.Config{ - AttestationVariant: oid.GCPSEVES{}.String(), - Provider: config.ProviderConfig{GCP: &config.GCPConfig{ - DeployCSIDriver: toPtr(true), - }}, + AttestationVariant: oid.AWSNitroTPM{}.String(), + Provider: config.ProviderConfig{AWS: &config.AWSConfig{}}, }, - enforceIDKeyDigest: false, - valuesModifier: prepareGCPValues, - ccmImage: "ccmImageForGCP", + valuesModifier: prepareAWSValues, + ccmImage: "ccmImageForAWS", }, "Azure": { config: &config.Config{ @@ -83,13 +80,30 @@ func TestConstellationServices(t *testing.T) { ccmImage: "ccmImageForAzure", cnmImage: "cnmImageForAzure", }, + "GCP": { + config: &config.Config{ + AttestationVariant: oid.GCPSEVES{}.String(), + Provider: config.ProviderConfig{GCP: &config.GCPConfig{ + DeployCSIDriver: toPtr(true), + }}, + }, + valuesModifier: prepareGCPValues, + ccmImage: "ccmImageForGCP", + }, + "OpenStack": { + config: &config.Config{ + AttestationVariant: oid.Dummy{}.String(), + Provider: config.ProviderConfig{OpenStack: &config.OpenStackConfig{}}, + }, + valuesModifier: prepareOpenStackValues, + ccmImage: "ccmImageForOpenStack", + }, "QEMU": { config: &config.Config{ AttestationVariant: oid.QEMUVTPM{}.String(), Provider: config.ProviderConfig{QEMU: &config.QEMUConfig{}}, }, - enforceIDKeyDigest: false, - valuesModifier: prepareQEMUValues, + valuesModifier: prepareQEMUValues, }, } @@ -288,6 +302,88 @@ func buildTestdataMap(csp string, expectedData map[string]string, require *requi } } +func prepareAWSValues(values map[string]any) error { + joinVals, ok := values["join-service"].(map[string]any) + if !ok { + return errors.New("missing 'join-service' key") + } + m := measurements.M{1: measurements.WithAllBytes(0xAA, false)} + mJSON, err := json.Marshal(m) + if err != nil { + return err + } + joinVals["measurements"] = string(mJSON) + joinVals["measurementSalt"] = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + ccmVals, ok := values["ccm"].(map[string]any) + if !ok { + return errors.New("missing 'ccm' key") + } + ccmVals["AWS"].(map[string]any)["subnetworkPodCIDR"] = "192.0.2.0/24" + + verificationVals, ok := values["verification-service"].(map[string]any) + if !ok { + return errors.New("missing 'verification-service' key") + } + verificationVals["loadBalancerIP"] = "127.0.0.1" + + konnectivityVals, ok := values["konnectivity"].(map[string]any) + if !ok { + return errors.New("missing 'konnectivity' key") + } + konnectivityVals["loadBalancerIP"] = "127.0.0.1" + + return nil +} + +func prepareAzureValues(values map[string]any) error { + joinVals, ok := values["join-service"].(map[string]any) + if !ok { + return errors.New("missing 'join-service' key") + } + joinVals["idkeydigests"] = "[\"baaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaad\", \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"]" + m := measurements.M{1: measurements.WithAllBytes(0xAA, false)} + mJSON, err := json.Marshal(m) + if err != nil { + return err + } + joinVals["measurements"] = string(mJSON) + joinVals["measurementSalt"] = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + ccmVals, ok := values["ccm"].(map[string]any) + if !ok { + return errors.New("missing 'ccm' key") + } + ccmVals["Azure"].(map[string]any)["subnetworkPodCIDR"] = "192.0.2.0/24" + ccmVals["Azure"].(map[string]any)["azureConfig"] = "baaaaaad" + + autoscalerVals, ok := values["autoscaler"].(map[string]any) + if !ok { + return errors.New("missing 'autoscaler' key") + } + autoscalerVals["Azure"] = map[string]any{ + "clientID": "AppClientID", + "clientSecret": "ClientSecretValue", + "resourceGroup": "resourceGroup", + "subscriptionID": "subscriptionID", + "tenantID": "TenantID", + } + + verificationVals, ok := values["verification-service"].(map[string]any) + if !ok { + return errors.New("missing 'verification-service' key") + } + verificationVals["loadBalancerIP"] = "127.0.0.1" + + konnectivityVals, ok := values["konnectivity"].(map[string]any) + if !ok { + return errors.New("missing 'konnectivity' key") + } + konnectivityVals["loadBalancerIP"] = "127.0.0.1" + + return nil +} + func prepareGCPValues(values map[string]any) error { joinVals, ok := values["join-service"].(map[string]any) if !ok { @@ -365,12 +461,11 @@ func prepareGCPValues(values map[string]any) error { return nil } -func prepareAzureValues(values map[string]any) error { +func prepareOpenStackValues(values map[string]any) error { joinVals, ok := values["join-service"].(map[string]any) if !ok { return errors.New("missing 'join-service' key") } - joinVals["idkeydigests"] = "[\"baaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaadbaaaaaad\", \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"]" m := measurements.M{1: measurements.WithAllBytes(0xAA, false)} mJSON, err := json.Marshal(m) if err != nil { @@ -383,20 +478,8 @@ func prepareAzureValues(values map[string]any) error { if !ok { return errors.New("missing 'ccm' key") } - ccmVals["Azure"].(map[string]any)["subnetworkPodCIDR"] = "192.0.2.0/24" - ccmVals["Azure"].(map[string]any)["azureConfig"] = "baaaaaad" - - autoscalerVals, ok := values["autoscaler"].(map[string]any) - if !ok { - return errors.New("missing 'autoscaler' key") - } - autoscalerVals["Azure"] = map[string]any{ - "clientID": "AppClientID", - "clientSecret": "ClientSecretValue", - "resourceGroup": "resourceGroup", - "subscriptionID": "subscriptionID", - "tenantID": "TenantID", - } + ccmVals["OpenStack"].(map[string]any)["subnetworkPodCIDR"] = "192.0.2.0/24" + ccmVals["OpenStack"].(map[string]any)["secretData"] = "baaaaaad" verificationVals, ok := values["verification-service"].(map[string]any) if !ok { diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml new file mode 100644 index 000000000..177754d78 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -0,0 +1,150 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: constellation-operator-controller-manager + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: constellation-operator-controller-manager + namespace: testNamespace + labels: + control-plane: controller-manager + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + template: + metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + command: + - /ko-app/v2 + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + - name: CONSTEL_CSP + value: GCP + - name: constellation-uid + value: "42424242424242" + image: constellationOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - mountPath: /host/usr/lib/os-release + name: usr-lib-os-release + - mountPath: /etc/os-release + name: etc-os-release + - mountPath: /etc/azure + name: azureconfig + readOnly: true + - mountPath: /etc/gce + name: gceconf + readOnly: true + - mountPath: /etc/constellation-upgrade-agent.sock + name: upgrade-agent-socket + readOnly: true + nodeSelector: + node-role.kubernetes.io/control-plane: "" + securityContext: + runAsUser: 0 + serviceAccountName: constellation-operator-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - hostPath: + path: /etc/kubernetes/pki/etcd + type: Directory + name: etcd-certs + - hostPath: + path: /usr/lib/os-release + type: File + name: usr-lib-os-release + - hostPath: + path: /etc/os-release + type: File + name: etc-os-release + - name: azureconfig + secret: + optional: true + secretName: azureconfig + - configMap: + name: gceconf + optional: true + name: gceconf + - name: upgrade-agent-socket + hostPath: + path: /run/constellation-upgrade-agent.sock + type: Socket diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml new file mode 100644 index 000000000..4fe9fe6c3 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml @@ -0,0 +1,61 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: constellation-operator-leader-election-role + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: constellation-operator-leader-election-rolebinding + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: 'constellation-operator-leader-election-role' +subjects: +- kind: ServiceAccount + name: 'constellation-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-config.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-config.yaml new file mode 100644 index 000000000..aab7eb462 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-config.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: constellation-operator-manager-config + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +data: + controller_manager_config.yaml: | + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + health: + healthProbeBindAddress: ":8081" + kind: ControllerManagerConfig + leaderElection: + leaderElect: true + resourceName: "38cc1645.edgeless.systems" + metrics: + bindAddress: "127.0.0.1:8080" + webhook: + port: 9443 diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml new file mode 100644 index 000000000..4fa4863c8 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -0,0 +1,222 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-operator-manager-role + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list +- apiGroups: + - "" + resources: + - nodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - get +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - autoscalingstrategies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - autoscalingstrategies/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversion + verbs: + - get + - list + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversion/status + verbs: + - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: constellation-operator-manager-rolebinding + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'constellation-operator-manager-role' +subjects: +- kind: ServiceAccount + name: 'constellation-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml new file mode 100644 index 000000000..8caefc5c0 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-operator-metrics-reader + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml new file mode 100644 index 000000000..62985a195 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: constellation-operator-controller-manager-metrics-service + namespace: testNamespace + labels: + control-plane: controller-manager + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + control-plane: controller-manager + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml new file mode 100644 index 000000000..d5cf87e91 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml @@ -0,0 +1,42 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-operator-proxy-role + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: constellation-operator-proxy-rolebinding + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'constellation-operator-proxy-role' +subjects: +- kind: ServiceAccount + name: 'constellation-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml new file mode 100644 index 000000000..54642ad37 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml @@ -0,0 +1,112 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-maintenance-operator-controller-manager + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: node-maintenance-operator-controller-manager + namespace: testNamespace + labels: + control-plane: controller-manager + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + template: + metadata: + labels: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --leader-elect + command: + - /manager + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + image: nodeMaintenanceOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 200m + memory: 100Mi + requests: + cpu: 100m + memory: 20Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + priorityClassName: system-cluster-critical + securityContext: + runAsNonRoot: true + serviceAccountName: node-maintenance-operator-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml new file mode 100644 index 000000000..7c34b1a5d --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml @@ -0,0 +1,63 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: node-maintenance-operator-leader-election-role + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: node-maintenance-operator-leader-election-rolebinding + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: 'node-maintenance-operator-leader-election-role' +subjects: +- kind: ServiceAccount + name: 'node-maintenance-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml new file mode 100644 index 000000000..fa9f582a4 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -0,0 +1,131 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-maintenance-operator-manager-role + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances/finalizers + verbs: + - update +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances/status + verbs: + - get + - patch + - update +- apiGroups: + - oauth.openshift.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-maintenance-operator-manager-rolebinding + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'node-maintenance-operator-manager-role' +subjects: +- kind: ServiceAccount + name: 'node-maintenance-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml new file mode 100644 index 000000000..9634d0552 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-maintenance-operator-metrics-reader + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml new file mode 100644 index 000000000..256c4f228 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: node-maintenance-operator-controller-manager-metrics-service + namespace: testNamespace + labels: + control-plane: controller-manager + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml new file mode 100644 index 000000000..97f50698a --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml @@ -0,0 +1,44 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-maintenance-operator-proxy-role + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-maintenance-operator-proxy-rolebinding + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'node-maintenance-operator-proxy-role' +subjects: +- kind: ServiceAccount + name: 'node-maintenance-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml new file mode 100644 index 000000000..6776e2c0b --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml @@ -0,0 +1,12 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: node-maintenance-operator-selfsigned-issuer + namespace: testNamespace + labels: + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + selfSigned: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml new file mode 100644 index 000000000..954fb8969 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml @@ -0,0 +1,18 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: node-maintenance-operator-serving-cert + namespace: testNamespace + labels: + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + dnsNames: + - 'node-maintenance-operator-webhook-service.testNamespace.svc' + - 'node-maintenance-operator-webhook-service.testNamespace.svc.cluster.local' + issuerRef: + kind: Issuer + name: node-maintenance-operator-selfsigned-issuer + secretName: webhook-server-cert diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml new file mode 100644 index 000000000..ec928916f --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml @@ -0,0 +1,34 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: node-maintenance-operator-validating-webhook-configuration + namespace: testNamespace + annotations: + cert-manager.io/inject-ca-from: testNamespace/node-maintenance-operator-serving-cert + labels: + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: node-maintenance-operator-webhook-service + namespace: testNamespace + path: /validate-nodemaintenance-medik8s-io-v1beta1-nodemaintenance + failurePolicy: Fail + name: vnodemaintenance.kb.io + rules: + - apiGroups: + - nodemaintenance.medik8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - nodemaintenances + sideEffects: None + timeoutSeconds: 15 diff --git a/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml new file mode 100644 index 000000000..c529bd6c9 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: node-maintenance-operator-webhook-service + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + ports: + - port: 443 + protocol: TCP + targetPort: 9443 diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/aws-deployment.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/aws-deployment.yaml new file mode 100644 index 000000000..c9c26fe9b --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/aws-deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +spec: + replicas: 0 + selector: + matchLabels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/name: cluster-autoscaler + template: + metadata: + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/name: cluster-autoscaler + spec: + containers: + - name: cluster-autoscaler + image: autoscalerImage + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /health-check + port: 8085 + ports: + - containerPort: 8085 + resources: {} + dnsPolicy: ClusterFirst + nodeSelector: + node-role.kubernetes.io/control-plane: "" + priorityClassName: system-cluster-critical + serviceAccountName: constellation-cluster-autoscaler + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Equal + value: "true" diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrole.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrole.yaml new file mode 100644 index 000000000..501c719d5 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrole.yaml @@ -0,0 +1,138 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-cluster-autoscaler + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +rules: +- apiGroups: + - "" + resources: + - events + - endpoints + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - "" + resources: + - pods/status + verbs: + - update +- apiGroups: + - "" + resourceNames: + - cluster-autoscaler + resources: + - endpoints + verbs: + - get + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get + - update +- apiGroups: + - "" + resources: + - namespaces + - pods + - services + - replicationcontrollers + - persistentvolumeclaims + - persistentvolumes + verbs: + - watch + - list + - get +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - watch + - list + - get +- apiGroups: + - batch + - extensions + resources: + - jobs + verbs: + - get + - list + - patch + - watch +- apiGroups: + - extensions + resources: + - replicasets + - daemonsets + verbs: + - watch + - list + - get +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - watch + - list +- apiGroups: + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - watch + - list + - get +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + - csidrivers + - csistoragecapacities + verbs: + - watch + - list + - get +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - cluster-autoscaler + resources: + - leases + verbs: + - get + - update diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..f2d509015 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: constellation-cluster-autoscaler +subjects: +- kind: ServiceAccount + name: constellation-cluster-autoscaler + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/poddisruptionbudget.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..3c474b05d --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/poddisruptionbudget.yaml @@ -0,0 +1,15 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/name: cluster-autoscaler diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/role.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/role.yaml new file mode 100644 index 000000000..e5cdd8ae7 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/role.yaml @@ -0,0 +1,26 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create +- apiGroups: + - "" + resourceNames: + - cluster-autoscaler-status + resources: + - configmaps + verbs: + - delete + - get + - update diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/rolebinding.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/rolebinding.yaml new file mode 100644 index 000000000..ef6f15262 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: constellation-cluster-autoscaler +subjects: +- kind: ServiceAccount + name: constellation-cluster-autoscaler + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/service.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/service.yaml new file mode 100644 index 000000000..84d228ff2 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +spec: + ports: + - name: http + port: 8085 + protocol: TCP + targetPort: 8085 + selector: + app.kubernetes.io/instance: constellation + app.kubernetes.io/name: cluster-autoscaler + type: ClusterIP diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/serviceaccount.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/serviceaccount.yaml new file mode 100644 index 000000000..79634e292 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/autoscaler/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: constellation-cluster-autoscaler + namespace: testNamespace + labels: + app.kubernetes.io/instance: constellation + app.kubernetes.io/managed-by: Constellation + app.kubernetes.io/name: cluster-autoscaler +automountServiceAccountToken: true diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/aws-daemonset.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/aws-daemonset.yaml new file mode 100644 index 000000000..23917ceef --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/aws-daemonset.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloud-controller-manager + namespace: testNamespace + labels: + k8s-app: cloud-controller-manager +spec: + selector: + matchLabels: + k8s-app: cloud-controller-manager + template: + metadata: + labels: + k8s-app: cloud-controller-manager + spec: + containers: + - name: cloud-controller-manager + image: ccmImageForAWS + args: + - --cloud-provider=aws + - --leader-elect=true + - --allocate-node-cidrs=false + - --configure-cloud-routes=false + - -v=2 + volumeMounts: + - mountPath: /etc/kubernetes + name: etckubernetes + readOnly: true + - mountPath: /etc/ssl + name: etcssl + readOnly: true + - mountPath: /etc/pki + name: etcpki + readOnly: true + resources: {} + serviceAccountName: cloud-controller-manager + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - name: etckubernetes + hostPath: + path: /etc/kubernetes + - name: etcssl + hostPath: + path: /etc/ssl + - name: etcpki + hostPath: + path: /etc/pki + updateStrategy: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..8624b04ee --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/serviceaccount.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/serviceaccount.yaml new file mode 100644 index 000000000..4b924605f --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/ccm/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrole.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrole.yaml new file mode 100644 index 000000000..1ead07241 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrole.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: join-service + name: join-service +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - create + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +- apiGroups: + - "update.edgeless.systems" + resources: + - joiningnodes + verbs: + - get + - create + - update + - patch +- apiGroups: + - "update.edgeless.systems" + resources: + - nodeversions + verbs: + - get diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..6e668f86b --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: join-service +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: join-service +subjects: +- kind: ServiceAccount + name: join-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/configmap.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/configmap.yaml new file mode 100644 index 000000000..4c445457a --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: join-config + namespace: testNamespace +data: + measurements: "{\"1\":{\"expected\":\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\"warnOnly\":false}}" +binaryData: + measurementSalt: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml new file mode 100644 index 000000000..2156f82a6 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: join-service + namespace: testNamespace + labels: + component: join-service + k8s-app: join-service + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: join-service + template: + metadata: + labels: + k8s-app: join-service + spec: + priorityClassName: system-cluster-critical + serviceAccountName: join-service + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + nodeSelector: + node-role.kubernetes.io/control-plane: "" + containers: + - name: join-service + image: joinServiceImage + args: + - --cloud-provider=AWS + - --key-service-endpoint=key-service.testNamespace:9000 + - --attestation-variant=aws-nitro-tpm + volumeMounts: + - mountPath: /var/config + name: config + readOnly: true + - mountPath: /etc/kubernetes + name: kubeadm + readOnly: true + ports: + - containerPort: 9090 + name: tcp + resources: {} + securityContext: + privileged: true + volumes: + - name: config + projected: + sources: + - configMap: + name: join-config + - configMap: + name: internal-config + - name: kubeadm + hostPath: + path: /etc/kubernetes + updateStrategy: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/service.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/service.yaml new file mode 100644 index 000000000..32bb4b31b --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: join-service + namespace: testNamespace +spec: + type: NodePort + selector: + k8s-app: join-service + ports: + - name: grpc + protocol: TCP + port: 9090 + targetPort: 9090 + nodePort: 30090 +status: + loadBalancer: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/serviceaccount.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/serviceaccount.yaml new file mode 100644 index 000000000..fd9b52173 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/join-service/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: join-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrole.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrole.yaml new file mode 100644 index 000000000..da6b91c99 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrole.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: key-service + name: key-service +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..0e3a0b6a8 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: key-service +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: key-service +subjects: + - kind: ServiceAccount + name: key-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/daemonset.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/daemonset.yaml new file mode 100644 index 000000000..e86cd502b --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/daemonset.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + component: key-service + k8s-app: key-service + kubernetes.io/cluster-service: "true" + name: key-service + namespace: testNamespace +spec: + selector: + matchLabels: + k8s-app: key-service + template: + metadata: + labels: + k8s-app: key-service + spec: + containers: + - name: key-service + image: keyServiceImage + args: + - --port=9000 + volumeMounts: + - mountPath: /var/config + name: config + readOnly: true + resources: {} + nodeSelector: + node-role.kubernetes.io/control-plane: "" + priorityClassName: system-cluster-critical + serviceAccountName: key-service + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - name: config + projected: + sources: + - configMap: + items: + - key: measurements + path: measurements + name: join-config + - secret: + items: + - key: mastersecret + path: mastersecret + - key: salt + path: salt + name: constellation-mastersecret + updateStrategy: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/mastersecret.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/mastersecret.yaml new file mode 100644 index 000000000..231c4329f --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/mastersecret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: constellation-mastersecret + namespace: testNamespace +data: + mastersecret: YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE= + salt: YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE= diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/service.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/service.yaml new file mode 100644 index 000000000..e381d7f44 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: key-service + namespace: testNamespace +spec: + ports: + - name: grpc + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + k8s-app: key-service + type: ClusterIP +status: + loadBalancer: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/serviceaccount.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/serviceaccount.yaml new file mode 100644 index 000000000..5659d4efa --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/key-service/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: key-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..f189cb6a3 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/cluster-service: "true" + name: system:konnectivity-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:konnectivity-server diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/daemonset.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/daemonset.yaml new file mode 100644 index 000000000..0f26cfbb9 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/daemonset.yaml @@ -0,0 +1,76 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: konnectivity-agent + name: konnectivity-agent + namespace: testNamespace +spec: + selector: + matchLabels: + k8s-app: konnectivity-agent + template: + metadata: + labels: + k8s-app: konnectivity-agent + spec: + containers: + - args: + - --logtostderr=true + - --proxy-server-host=127.0.0.1 + - --ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - --proxy-server-port=8132 + - --admin-server-port=8133 + - --health-server-port=8134 + - --service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token + - --agent-identifiers=host=$(HOST_IP) + - --sync-forever=true + - --keepalive-time=60m + - --sync-interval=5s + - --sync-interval-cap=30s + - --probe-interval=5s + - --v=3 + command: + - /proxy-agent + env: + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + image: konnectivityImage + livenessProbe: + httpGet: + path: /healthz + port: 8134 + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: konnectivity-agent + resources: {} + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: konnectivity-agent-token + readOnly: true + priorityClassName: system-cluster-critical + serviceAccountName: konnectivity-agent + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - name: konnectivity-agent-token + projected: + sources: + - serviceAccountToken: + audience: system:konnectivity-server + path: konnectivity-agent-token + updateStrategy: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/serviceaccount.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/serviceaccount.yaml new file mode 100644 index 000000000..ad307c56f --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/konnectivity/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/cluster-service: "true" + name: konnectivity-agent + namespace: testNamespace diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/daemonset.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/daemonset.yaml new file mode 100644 index 000000000..cef54d4dd --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/daemonset.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + component: verification-service + k8s-app: verification-service + name: verification-service + namespace: testNamespace +spec: + selector: + matchLabels: + k8s-app: verification-service + template: + metadata: + labels: + k8s-app: verification-service + spec: + containers: + - args: + - --attestation-variant=aws-nitro-tpm + image: verificationImage + name: verification-service + ports: + - containerPort: 8080 + name: http + - containerPort: 9090 + name: grpc + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/kernel/security/ + name: event-log + readOnly: true + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /sys/kernel/security/ + name: event-log + updateStrategy: {} diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml new file mode 100644 index 000000000..d76218868 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: verify + namespace: testNamespace +spec: + allocateLoadBalancerNodePorts: false + externalIPs: + - 127.0.0.1 + loadBalancerClass: constellation + ports: + - name: grpc + port: 30081 + protocol: TCP + targetPort: 9090 + selector: + k8s-app: verification-service + type: LoadBalancer diff --git a/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/nodeport-service.yaml b/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/nodeport-service.yaml new file mode 100644 index 000000000..8b9bb90f1 --- /dev/null +++ b/cli/internal/helm/testdata/AWS/constellation-services/charts/verification-service/templates/nodeport-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: verification-service + namespace: testNamespace +spec: + ports: + - name: http + nodePort: 30080 + port: 8080 + protocol: TCP + targetPort: 8080 + - name: grpc + nodePort: 30081 + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + k8s-app: verification-service + type: NodePort diff --git a/cli/internal/helm/testdata/AWS/constellation-services/templates/.gitkeep b/cli/internal/helm/testdata/AWS/constellation-services/templates/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml new file mode 100644 index 000000000..177754d78 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -0,0 +1,150 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: constellation-operator-controller-manager + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: constellation-operator-controller-manager + namespace: testNamespace + labels: + control-plane: controller-manager + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + template: + metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + command: + - /ko-app/v2 + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + - name: CONSTEL_CSP + value: GCP + - name: constellation-uid + value: "42424242424242" + image: constellationOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - mountPath: /host/usr/lib/os-release + name: usr-lib-os-release + - mountPath: /etc/os-release + name: etc-os-release + - mountPath: /etc/azure + name: azureconfig + readOnly: true + - mountPath: /etc/gce + name: gceconf + readOnly: true + - mountPath: /etc/constellation-upgrade-agent.sock + name: upgrade-agent-socket + readOnly: true + nodeSelector: + node-role.kubernetes.io/control-plane: "" + securityContext: + runAsUser: 0 + serviceAccountName: constellation-operator-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - hostPath: + path: /etc/kubernetes/pki/etcd + type: Directory + name: etcd-certs + - hostPath: + path: /usr/lib/os-release + type: File + name: usr-lib-os-release + - hostPath: + path: /etc/os-release + type: File + name: etc-os-release + - name: azureconfig + secret: + optional: true + secretName: azureconfig + - configMap: + name: gceconf + optional: true + name: gceconf + - name: upgrade-agent-socket + hostPath: + path: /run/constellation-upgrade-agent.sock + type: Socket diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml new file mode 100644 index 000000000..4fe9fe6c3 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/leader-election-rbac.yaml @@ -0,0 +1,61 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: constellation-operator-leader-election-role + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: constellation-operator-leader-election-rolebinding + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: 'constellation-operator-leader-election-role' +subjects: +- kind: ServiceAccount + name: 'constellation-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-config.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-config.yaml new file mode 100644 index 000000000..aab7eb462 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-config.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: constellation-operator-manager-config + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +data: + controller_manager_config.yaml: | + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + health: + healthProbeBindAddress: ":8081" + kind: ControllerManagerConfig + leaderElection: + leaderElect: true + resourceName: "38cc1645.edgeless.systems" + metrics: + bindAddress: "127.0.0.1:8080" + webhook: + port: 9443 diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml new file mode 100644 index 000000000..4fa4863c8 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -0,0 +1,222 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-operator-manager-role + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list +- apiGroups: + - "" + resources: + - nodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - get +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - autoscalingstrategies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - autoscalingstrategies/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversion + verbs: + - get + - list + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversion/status + verbs: + - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: constellation-operator-manager-rolebinding + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'constellation-operator-manager-role' +subjects: +- kind: ServiceAccount + name: 'constellation-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml new file mode 100644 index 000000000..8caefc5c0 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-reader-rbac.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-operator-metrics-reader + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml new file mode 100644 index 000000000..62985a195 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/metrics-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: constellation-operator-controller-manager-metrics-service + namespace: testNamespace + labels: + control-plane: controller-manager + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + control-plane: controller-manager + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml new file mode 100644 index 000000000..d5cf87e91 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/proxy-rbac.yaml @@ -0,0 +1,42 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: constellation-operator-proxy-role + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: constellation-operator-proxy-rolebinding + namespace: testNamespace + labels: + helm.sh/chart: constellation-operator-0.0.0 + app.kubernetes.io/name: constellation-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'constellation-operator-proxy-role' +subjects: +- kind: ServiceAccount + name: 'constellation-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml new file mode 100644 index 000000000..54642ad37 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/deployment.yaml @@ -0,0 +1,112 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-maintenance-operator-controller-manager + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: node-maintenance-operator-controller-manager + namespace: testNamespace + labels: + control-plane: controller-manager + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + template: + metadata: + labels: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --leader-elect + command: + - /manager + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + image: nodeMaintenanceOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 200m + memory: 100Mi + requests: + cpu: 100m + memory: 20Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + priorityClassName: system-cluster-critical + securityContext: + runAsNonRoot: true + serviceAccountName: node-maintenance-operator-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml new file mode 100644 index 000000000..7c34b1a5d --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/leader-election-rbac.yaml @@ -0,0 +1,63 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: node-maintenance-operator-leader-election-role + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: node-maintenance-operator-leader-election-rolebinding + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: 'node-maintenance-operator-leader-election-role' +subjects: +- kind: ServiceAccount + name: 'node-maintenance-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml new file mode 100644 index 000000000..fa9f582a4 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -0,0 +1,131 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-maintenance-operator-manager-role + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances/finalizers + verbs: + - update +- apiGroups: + - nodemaintenance.medik8s.io + resources: + - nodemaintenances/status + verbs: + - get + - patch + - update +- apiGroups: + - oauth.openshift.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-maintenance-operator-manager-rolebinding + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'node-maintenance-operator-manager-role' +subjects: +- kind: ServiceAccount + name: 'node-maintenance-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml new file mode 100644 index 000000000..9634d0552 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-reader-rbac.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-maintenance-operator-metrics-reader + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml new file mode 100644 index 000000000..256c4f228 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/metrics-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: node-maintenance-operator-controller-manager-metrics-service + namespace: testNamespace + labels: + control-plane: controller-manager + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml new file mode 100644 index 000000000..97f50698a --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/proxy-rbac.yaml @@ -0,0 +1,44 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-maintenance-operator-proxy-role + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-maintenance-operator-proxy-rolebinding + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'node-maintenance-operator-proxy-role' +subjects: +- kind: ServiceAccount + name: 'node-maintenance-operator-controller-manager' + namespace: 'testNamespace' diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml new file mode 100644 index 000000000..6776e2c0b --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/selfsigned-issuer.yaml @@ -0,0 +1,12 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: node-maintenance-operator-selfsigned-issuer + namespace: testNamespace + labels: + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + selfSigned: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml new file mode 100644 index 000000000..954fb8969 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/serving-cert.yaml @@ -0,0 +1,18 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: node-maintenance-operator-serving-cert + namespace: testNamespace + labels: + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + dnsNames: + - 'node-maintenance-operator-webhook-service.testNamespace.svc' + - 'node-maintenance-operator-webhook-service.testNamespace.svc.cluster.local' + issuerRef: + kind: Issuer + name: node-maintenance-operator-selfsigned-issuer + secretName: webhook-server-cert diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml new file mode 100644 index 000000000..ec928916f --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/validating-webhook-configuration.yaml @@ -0,0 +1,34 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: node-maintenance-operator-validating-webhook-configuration + namespace: testNamespace + annotations: + cert-manager.io/inject-ca-from: testNamespace/node-maintenance-operator-serving-cert + labels: + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: node-maintenance-operator-webhook-service + namespace: testNamespace + path: /validate-nodemaintenance-medik8s-io-v1beta1-nodemaintenance + failurePolicy: Fail + name: vnodemaintenance.kb.io + rules: + - apiGroups: + - nodemaintenance.medik8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - nodemaintenances + sideEffects: None + timeoutSeconds: 15 diff --git a/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml new file mode 100644 index 000000000..c529bd6c9 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/webhook-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: node-maintenance-operator-webhook-service + namespace: testNamespace + labels: + node-maintenance-operator: "" + helm.sh/chart: node-maintenance-operator-0.0.0 + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + control-plane: controller-manager + node-maintenance-operator: "" + app.kubernetes.io/name: node-maintenance-operator + app.kubernetes.io/instance: testRelease + ports: + - port: 443 + protocol: TCP + targetPort: 9443 diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..8624b04ee --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-daemonset.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-daemonset.yaml new file mode 100644 index 000000000..2beeb4871 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-daemonset.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloud-controller-manager + namespace: testNamespace + labels: + k8s-app: cloud-controller-manager +spec: + selector: + matchLabels: + k8s-app: cloud-controller-manager + template: + metadata: + labels: + k8s-app: cloud-controller-manager + spec: + containers: + - name: cloud-controller-manager + image: ccmImageForOpenStack + args: + - /bin/openstack-cloud-controller-manager + - --cloud-provider=openstack + - --cloud-config=/etc/config/cloud.conf + - --leader-elect=true + - --allocate-node-cidrs=false + - -v=2 + volumeMounts: + - name: etckubernetes + mountPath: /etc/kubernetes + readOnly: true + - name: etcssl + mountPath: /etc/ssl + readOnly: true + - name: etcpki + mountPath: /etc/pki + readOnly: true + - name: etcconfig + mountPath: /etc/config + readOnly: true + resources: {} + serviceAccountName: cloud-controller-manager + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - name: etckubernetes + hostPath: + path: /etc/kubernetes + - name: etcssl + hostPath: + path: /etc/ssl + - name: etcpki + hostPath: + path: /etc/pki + - name: etcconfig + secret: + secretName: openstackkey + updateStrategy: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-secret.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-secret.yaml new file mode 100644 index 000000000..f0061a2ef --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/openstack-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openstackkey + namespace: testNamespace +data: + cloud.conf: YmFhYWFhYWQ= diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/serviceaccount.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/serviceaccount.yaml new file mode 100644 index 000000000..4b924605f --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/ccm/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrole.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrole.yaml new file mode 100644 index 000000000..1ead07241 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrole.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: join-service + name: join-service +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - create + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +- apiGroups: + - "update.edgeless.systems" + resources: + - joiningnodes + verbs: + - get + - create + - update + - patch +- apiGroups: + - "update.edgeless.systems" + resources: + - nodeversions + verbs: + - get diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..6e668f86b --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: join-service +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: join-service +subjects: +- kind: ServiceAccount + name: join-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/configmap.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/configmap.yaml new file mode 100644 index 000000000..4c445457a --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: join-config + namespace: testNamespace +data: + measurements: "{\"1\":{\"expected\":\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\"warnOnly\":false}}" +binaryData: + measurementSalt: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml new file mode 100644 index 000000000..d4e7d56e2 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: join-service + namespace: testNamespace + labels: + component: join-service + k8s-app: join-service + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: join-service + template: + metadata: + labels: + k8s-app: join-service + spec: + priorityClassName: system-cluster-critical + serviceAccountName: join-service + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + nodeSelector: + node-role.kubernetes.io/control-plane: "" + containers: + - name: join-service + image: joinServiceImage + args: + - --cloud-provider=OpenStack + - --key-service-endpoint=key-service.testNamespace:9000 + - --attestation-variant=dummy + volumeMounts: + - mountPath: /var/config + name: config + readOnly: true + - mountPath: /etc/kubernetes + name: kubeadm + readOnly: true + ports: + - containerPort: 9090 + name: tcp + resources: {} + securityContext: + privileged: true + volumes: + - name: config + projected: + sources: + - configMap: + name: join-config + - configMap: + name: internal-config + - name: kubeadm + hostPath: + path: /etc/kubernetes + updateStrategy: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/service.yaml new file mode 100644 index 000000000..32bb4b31b --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: join-service + namespace: testNamespace +spec: + type: NodePort + selector: + k8s-app: join-service + ports: + - name: grpc + protocol: TCP + port: 9090 + targetPort: 9090 + nodePort: 30090 +status: + loadBalancer: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/serviceaccount.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/serviceaccount.yaml new file mode 100644 index 000000000..fd9b52173 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: join-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrole.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrole.yaml new file mode 100644 index 000000000..da6b91c99 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrole.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: key-service + name: key-service +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..0e3a0b6a8 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: key-service +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: key-service +subjects: + - kind: ServiceAccount + name: key-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/daemonset.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/daemonset.yaml new file mode 100644 index 000000000..e86cd502b --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/daemonset.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + component: key-service + k8s-app: key-service + kubernetes.io/cluster-service: "true" + name: key-service + namespace: testNamespace +spec: + selector: + matchLabels: + k8s-app: key-service + template: + metadata: + labels: + k8s-app: key-service + spec: + containers: + - name: key-service + image: keyServiceImage + args: + - --port=9000 + volumeMounts: + - mountPath: /var/config + name: config + readOnly: true + resources: {} + nodeSelector: + node-role.kubernetes.io/control-plane: "" + priorityClassName: system-cluster-critical + serviceAccountName: key-service + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - name: config + projected: + sources: + - configMap: + items: + - key: measurements + path: measurements + name: join-config + - secret: + items: + - key: mastersecret + path: mastersecret + - key: salt + path: salt + name: constellation-mastersecret + updateStrategy: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/mastersecret.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/mastersecret.yaml new file mode 100644 index 000000000..231c4329f --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/mastersecret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: constellation-mastersecret + namespace: testNamespace +data: + mastersecret: YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE= + salt: YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWE= diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/service.yaml new file mode 100644 index 000000000..e381d7f44 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: key-service + namespace: testNamespace +spec: + ports: + - name: grpc + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + k8s-app: key-service + type: ClusterIP +status: + loadBalancer: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/serviceaccount.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/serviceaccount.yaml new file mode 100644 index 000000000..5659d4efa --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/key-service/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: key-service + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..f189cb6a3 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/cluster-service: "true" + name: system:konnectivity-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:konnectivity-server diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/daemonset.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/daemonset.yaml new file mode 100644 index 000000000..0f26cfbb9 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/daemonset.yaml @@ -0,0 +1,76 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: konnectivity-agent + name: konnectivity-agent + namespace: testNamespace +spec: + selector: + matchLabels: + k8s-app: konnectivity-agent + template: + metadata: + labels: + k8s-app: konnectivity-agent + spec: + containers: + - args: + - --logtostderr=true + - --proxy-server-host=127.0.0.1 + - --ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - --proxy-server-port=8132 + - --admin-server-port=8133 + - --health-server-port=8134 + - --service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token + - --agent-identifiers=host=$(HOST_IP) + - --sync-forever=true + - --keepalive-time=60m + - --sync-interval=5s + - --sync-interval-cap=30s + - --probe-interval=5s + - --v=3 + command: + - /proxy-agent + env: + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + image: konnectivityImage + livenessProbe: + httpGet: + path: /healthz + port: 8134 + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: konnectivity-agent + resources: {} + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: konnectivity-agent-token + readOnly: true + priorityClassName: system-cluster-critical + serviceAccountName: konnectivity-agent + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - name: konnectivity-agent-token + projected: + sources: + - serviceAccountToken: + audience: system:konnectivity-server + path: konnectivity-agent-token + updateStrategy: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/serviceaccount.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/serviceaccount.yaml new file mode 100644 index 000000000..ad307c56f --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/konnectivity/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/cluster-service: "true" + name: konnectivity-agent + namespace: testNamespace diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/daemonset.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/daemonset.yaml new file mode 100644 index 000000000..bf97e74a1 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/daemonset.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + component: verification-service + k8s-app: verification-service + name: verification-service + namespace: testNamespace +spec: + selector: + matchLabels: + k8s-app: verification-service + template: + metadata: + labels: + k8s-app: verification-service + spec: + containers: + - args: + - --attestation-variant=dummy + image: verificationImage + name: verification-service + ports: + - containerPort: 8080 + name: http + - containerPort: 9090 + name: grpc + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/kernel/security/ + name: event-log + readOnly: true + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /sys/kernel/security/ + name: event-log + updateStrategy: {} diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml new file mode 100644 index 000000000..d76218868 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/loadbalancer-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: verify + namespace: testNamespace +spec: + allocateLoadBalancerNodePorts: false + externalIPs: + - 127.0.0.1 + loadBalancerClass: constellation + ports: + - name: grpc + port: 30081 + protocol: TCP + targetPort: 9090 + selector: + k8s-app: verification-service + type: LoadBalancer diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/nodeport-service.yaml b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/nodeport-service.yaml new file mode 100644 index 000000000..8b9bb90f1 --- /dev/null +++ b/cli/internal/helm/testdata/OpenStack/constellation-services/charts/verification-service/templates/nodeport-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: verification-service + namespace: testNamespace +spec: + ports: + - name: http + nodePort: 30080 + port: 8080 + protocol: TCP + targetPort: 8080 + - name: grpc + nodePort: 30081 + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + k8s-app: verification-service + type: NodePort diff --git a/cli/internal/helm/testdata/OpenStack/constellation-services/templates/.gitkeep b/cli/internal/helm/testdata/OpenStack/constellation-services/templates/.gitkeep new file mode 100644 index 000000000..e69de29bb