ci: adjust performance benchmark to run on different attestation variants (#3129)

* Create perf bench artifacts based on attestation variant, not CSP
* Enable perf bench on gcp-sev-snp, azure-tdx and AWS

---------

Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
Daniel Weiße 2024-06-04 13:23:07 +02:00 committed by GitHub
parent 69048e430e
commit 66aa8a8d52
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 91 additions and 67 deletions

View File

@ -5,7 +5,6 @@ inputs:
cloudProvider:
description: "Which cloud provider to use."
required: true
# TODO: Create different report depending on the attestation variant
attestationVariant:
description: "Which attestation variant to use."
required: true
@ -56,18 +55,18 @@ runs:
ref: 67c64c854841165b778979375444da1c02e02210
path: k8s-bench-suite
- name: Run FIO benchmark without caching in Azure
if: inputs.cloudProvider == 'azure'
- name: Run FIO benchmark
shell: bash
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
run: |
if [[ "${{ inputs.cloudProvider }}" == "azure" ]]
then
cat <<EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: encrypted-rwo-no-cache
name: fio-benchmark
allowVolumeExpansion: true
allowedTopologies: []
mountOptions: []
@ -78,34 +77,47 @@ runs:
reclaimPolicy: Delete
volumeBindingMode: Immediate
EOF
mkdir -p out
kubestr fio -e "out/fio-constellation-${{ inputs.cloudProvider }}.json" -o json -s encrypted-rwo-no-cache -z 400Gi -f .github/actions/e2e_benchmark/fio.ini
fi
- name: Run FIO benchmark
if: inputs.cloudProvider == 'gcp'
shell: bash
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
run: |
if [[ "${{ inputs.cloudProvider }}" == "gcp" ]]
then
cat <<EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: encrypted-balanced-rwo
name: fio-benchmark
provisioner: gcp.csi.confidential.cloud
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
type: pd-balanced
EOF
fi
if [[ "${{ inputs.cloudProvider }}" == "aws" ]]
then
cat <<EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fio-benchmark
parameters:
type: gp3
provisioner: aws.csi.confidential.cloud
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
EOF
fi
mkdir -p out
kubestr fio -e "out/fio-constellation-${{ inputs.cloudProvider }}.json" -o json -s encrypted-balanced-rwo -z 400Gi -f .github/actions/e2e_benchmark/fio.ini
kubestr fio -e "out/fio-constellation-${{ inputs.attestationVariant }}.json" -o json -s fio-benchmark -z 400Gi -f .github/actions/e2e_benchmark/fio.ini
- name: Upload raw FIO benchmark results
if: (!env.ACT)
uses: ./.github/actions/artifact_upload
with:
path: "out/fio-constellation-${{ inputs.cloudProvider }}.json"
path: "out/fio-constellation-${{ inputs.attestationVariant }}.json"
name: "fio-constellation-${{ inputs.artifactNameSuffix }}.json"
encryptionSecret: ${{ inputs.encryptionSecret }}
@ -115,19 +127,19 @@ runs:
KUBECONFIG: ${{ inputs.kubeconfig }}
TERM: xterm-256color
run: |
workers="$(kubectl get nodes -o name | grep worker)"
workers="$(kubectl get nodes -o name -l '!node-role.kubernetes.io/control-plane')"
echo -e "Found workers:\n$workers"
server="$(echo "$workers" | tail +1 | head -1 | cut -d '/' -f2)"
echo "Server: $server"
client="$(echo "$workers" | tail +2 | head -1 | cut -d '/' -f2)"
echo "Client: $client"
k8s-bench-suite/knb -f "out/knb-constellation-${{ inputs.cloudProvider }}.json" -o json --server-node "$server" --client-node "$client"
k8s-bench-suite/knb -f "out/knb-constellation-${{ inputs.attestationVariant }}.json" -o json --server-node "$server" --client-node "$client"
- name: Upload raw knb benchmark results
if: (!env.ACT)
uses: ./.github/actions/artifact_upload
with:
path: "out/knb-constellation-${{ inputs.cloudProvider }}.json"
path: "out/knb-constellation-${{ inputs.attestationVariant }}.json"
name: "knb-constellation-${{ inputs.artifactNameSuffix }}.json"
encryptionSecret: ${{ inputs.encryptionSecret }}
@ -139,6 +151,7 @@ runs:
# Working directory containing the previous results as JSON and to contain the graphs
BDIR: benchmarks
CSP: ${{ inputs.cloudProvider }}
ATTESTATION_VARIANT: ${{ inputs.attestationVariant }}
run: |
mkdir -p benchmarks
python .github/actions/e2e_benchmark/evaluate/parse.py
@ -148,7 +161,7 @@ runs:
uses: ./.github/actions/artifact_upload
with:
path: >
benchmarks/constellation-${{ inputs.cloudProvider }}.json
benchmarks/constellation-${{ inputs.attestationVariant }}.json
name: "benchmarks-${{ inputs.artifactNameSuffix }}"
encryptionSecret: ${{ inputs.encryptionSecret }}
@ -166,12 +179,10 @@ runs:
- name: Get previous benchmark records from S3
shell: bash
env:
CSP: ${{ inputs.cloudProvider }}
run: |
aws s3 cp --recursive ${S3_PATH} ./ --no-progress
if [[ -f constellation-${CSP}.json ]]; then
mv constellation-${CSP}.json benchmarks/constellation-${CSP}-previous.json
if aws s3 cp "${S3_PATH}/constellation-${{ inputs.attestationVariant }}.json" ./ --no-progress
then
mv "constellation-${{ inputs.attestationVariant }}.json" "benchmarks/constellation-${{ inputs.attestationVariant }}-previous.json"
else
echo "::warning::Couldn't retrieve previous benchmark records from s3"
fi
@ -180,15 +191,15 @@ runs:
shell: bash
env:
# Paths to benchmark results as JSON of the previous run and the current run
PREV_BENCH: benchmarks/constellation-${{ inputs.cloudProvider }}-previous.json
CURR_BENCH: benchmarks/constellation-${{ inputs.cloudProvider }}.json
PREV_BENCH: benchmarks/constellation-${{ inputs.attestationVariant }}-previous.json
CURR_BENCH: benchmarks/constellation-${{ inputs.attestationVariant }}.json
run: |
if [[ -f "$PREV_BENCH" ]]; then
# Fails if the results are outside the threshold range
python .github/actions/e2e_benchmark/evaluate/compare.py >> $GITHUB_STEP_SUMMARY
fi
- name: Upload benchmark results to opensearch
- name: Upload benchmark results to OpenSearch
if: (!env.ACT)
shell: bash
env:
@ -198,14 +209,12 @@ runs:
run: |
curl -XPOST \
-u "${OPENSEARCH_USER}:${OPENSEARCH_PWD}" \
"${OPENSEARCH_DOMAIN}/benchmarks-${{ inputs.cloudProvider }}-$(date '+%Y')"/_doc \
--data-binary @benchmarks/constellation-${{ inputs.cloudProvider }}.json \
"${OPENSEARCH_DOMAIN}/benchmarks-${{ inputs.attestationVariant }}-$(date '+%Y')"/_doc \
--data-binary @benchmarks/constellation-${{ inputs.attestationVariant }}.json \
-H 'Content-Type: application/json'
- name: Update benchmark records in S3
if: github.ref_name == 'main'
shell: bash
env:
CSP: ${{ inputs.cloudProvider }}
run: |
aws s3 cp benchmarks/constellation-${CSP}.json ${S3_PATH}/constellation-${CSP}.json
aws s3 cp benchmarks/constellation-${{ inputs.attestationVariant }}.json ${S3_PATH}/constellation-${{ inputs.attestationVariant }}.json

View File

@ -94,18 +94,18 @@ class BenchmarkComparer:
raise ValueError('Failed reading benchmark file: {e}'.format(e=e))
try:
name = bench_curr['provider']
name = bench_curr['attestationVariant']
except KeyError:
raise ValueError(
'Current benchmark record file does not contain provider.')
'Current benchmark record file does not contain attestationVariant.')
try:
prev_name = bench_prev['provider']
prev_name = bench_prev['attestationVariant']
except KeyError:
raise ValueError(
'Previous benchmark record file does not contain provider.')
'Previous benchmark record file does not contain attestationVariant.')
if name != prev_name:
raise ValueError(
'Cloud providers of previous and current benchmark data do not match.')
'Cloud attestationVariants of previous and current benchmark data do not match.')
if 'fio' not in bench_prev.keys() or 'fio' not in bench_curr.keys():
raise ValueError('Benchmarks do not both contain fio records.')

View File

@ -7,7 +7,7 @@ from datetime import datetime
from evaluators import fio, knb
def configure() -> Tuple[str, str, str, str | None, str, str, str, str]:
def configure() -> Tuple[str, str, str, str, str | None, str, str, str, str]:
"""Read the benchmark data paths.
Expects ENV vars (required):
@ -25,27 +25,29 @@ def configure() -> Tuple[str, str, str, str | None, str, str, str, str]:
"""
base_path = os.environ.get('BENCH_RESULTS', None)
csp = os.environ.get('CSP', None)
attestation_variant = os.environ.get('ATTESTATION_VARIANT', None)
out_dir = os.environ.get('BDIR', None)
if not base_path or not csp or not out_dir:
if not base_path or not csp or not out_dir or not attestation_variant:
raise TypeError(
'ENV variables BENCH_RESULTS, CSP, BDIR are required.')
'ENV variables BENCH_RESULTS, CSP, BDIR, ATTESTATION_VARIANT are required.')
ext_provider_name = os.environ.get('EXT_NAME', None)
commit_hash = os.environ.get('GITHUB_SHA', 'N/A')
commit_ref = os.environ.get('GITHUB_REF_NAME', 'N/A')
actor = os.environ.get('GITHUB_ACTOR', 'N/A')
workflow = os.environ.get('GITHUB_WORKFLOW', 'N/A')
return base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow
return base_path, csp, attestation_variant, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow
class BenchmarkParser:
def __init__(self, base_path, csp, out_dir, ext_provider_name=None, commit_hash="N/A", commit_ref="N/A", actor="N/A", workflow="N/A"):
def __init__(self, base_path, csp, attestation_variant, out_dir, ext_provider_name=None, commit_hash="N/A", commit_ref="N/A", actor="N/A", workflow="N/A"):
self.base_path = base_path
self.csp = csp
self.attestation_variant = attestation_variant
self.out_dir = out_dir
self.ext_provider_name = ext_provider_name
if not self.ext_provider_name:
self.ext_provider_name = f'constellation-{csp}'
self.ext_provider_name = f'constellation-{attestation_variant}'
self.commit_hash = commit_hash
self.commit_ref = commit_ref
self.actor = actor
@ -88,6 +90,7 @@ class BenchmarkParser:
},
'@timestamp': str(timestamp),
'provider': self.ext_provider_name,
'attestationVariant': self.attestation_variant,
'fio': {},
'knb': {}}
@ -101,8 +104,8 @@ class BenchmarkParser:
def main():
base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow = configure()
p = BenchmarkParser(base_path, csp, out_dir, ext_provider_name,
base_path, csp, attestation_variant, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow = configure()
p = BenchmarkParser(base_path, csp, attestation_variant, out_dir, ext_provider_name,
commit_hash, commit_ref, actor, workflow)
p.parse()

View File

@ -7,7 +7,7 @@ size=10Gi
time_based=1
group_reporting
thread
cpus_allowed=1
cpus_allowed=0
[read_iops]

View File

@ -231,23 +231,32 @@ jobs:
runner: "ubuntu-22.04"
clusterCreation: "cli"
# perf-bench test on latest k8s version, not supported on AWS
# perf-bench test on latest k8s version
- test: "perf-bench"
attestationVariant: "gcp-sev-es"
kubernetes-version: "v1.29"
runner: "ubuntu-22.04"
clusterCreation: "cli"
# Not yet supported on gcp-sev-snp
#- test: "perf-bench"
# attestationVariant: "gcp-sev-snp"
# kubernetes-version: "v1.29"
# runner: "ubuntu-22.04"
# clusterCreation: "cli"
- test: "perf-bench"
attestationVariant: "gcp-sev-snp"
kubernetes-version: "v1.29"
runner: "ubuntu-22.04"
clusterCreation: "cli"
- test: "perf-bench"
attestationVariant: "azure-sev-snp"
kubernetes-version: "v1.29"
runner: "ubuntu-22.04"
clusterCreation: "cli"
- test: "perf-bench"
attestationVariant: "azure-tdx"
kubernetes-version: "v1.29"
runner: "ubuntu-22.04"
clusterCreation: "cli"
- test: "perf-bench"
attestationVariant: "aws-sev-snp"
kubernetes-version: "v1.29"
runner: "ubuntu-22.04"
clusterCreation: "cli"
# s3proxy test on latest k8s version
- test: "s3proxy"

View File

@ -241,29 +241,32 @@ jobs:
kubernetes-version: "v1.29"
clusterCreation: "cli"
# perf-bench test on latest k8s version, not supported on AWS
# perf-bench test on latest k8s version
- test: "perf-bench"
refStream: "ref/main/stream/debug/?"
attestationVariant: "gcp-sev-es"
kubernetes-version: "v1.29"
clusterCreation: "cli"
# Not yet supported on gcp-sev-snp
#- test: "perf-bench"
# refStream: "ref/main/stream/debug/?"
# attestationVariant: "gcp-sev-snp"
# kubernetes-version: "v1.29"
# clusterCreation: "cli"
- test: "perf-bench"
refStream: "ref/main/stream/debug/?"
attestationVariant: "gcp-sev-snp"
kubernetes-version: "v1.29"
clusterCreation: "cli"
- test: "perf-bench"
refStream: "ref/main/stream/debug/?"
attestationVariant: "azure-sev-snp"
kubernetes-version: "v1.29"
clusterCreation: "cli"
# TODO: check what needs to be done for perf-bench on Azure TDX
#- test: "perf-bench"
# refStream: "ref/main/stream/debug/?"
# attestationVariant: "azure-tdx"
# kubernetes-version: "v1.29"
# clusterCreation: "cli"
- test: "perf-bench"
refStream: "ref/main/stream/debug/?"
attestationVariant: "azure-tdx"
kubernetes-version: "v1.29"
clusterCreation: "cli"
- test: "perf-bench"
refStream: "ref/main/stream/debug/?"
attestationVariant: "aws-sev-snp"
kubernetes-version: "v1.29"
clusterCreation: "cli"
# s3proxy test on latest k8s version
- test: "s3proxy"