AB#2191 Add K-Bench CI step to manual workflow

Add the option to run K-Bench performance to the manual CI workflow
Install CSI drivers in the cluster for K-Bench benchmarks
Attach the results to the workflow in the GitHub Actions view
This commit is contained in:
Christoph Meyer 2022-11-01 11:23:01 +00:00 committed by cm
parent 67a99434e9
commit f4ff473677
8 changed files with 181 additions and 3 deletions

View File

@ -1,5 +1,5 @@
name: sonobuoy
description: "Executed the e2e test framework sonobuoy."
description: "Execute the e2e test framework sonobuoy."
inputs:
sonobuoyVersion:

View File

@ -40,7 +40,7 @@ inputs:
description: "The resource group to use"
required: false
test:
description: "The test to run. Can currently be one of [sonobuoy full, sonobuoy quick, autoscaling, nop]."
description: "The test to run. Can currently be one of [sonobuoy full, sonobuoy quick, autoscaling, k-bench, nop]."
required: true
sonobuoyTestSuiteCmd:
description: "The sonobuoy test suite to run."
@ -50,7 +50,7 @@ runs:
using: "composite"
steps:
- name: Check input
if: ${{ !contains(fromJson('["sonobuoy full", "sonobuoy quick", "autoscaling", "nop"]'), inputs.test) }}
if: ${{ !contains(fromJson('["sonobuoy full", "sonobuoy quick", "autoscaling", "k-bench", "nop"]'), inputs.test) }}
shell: bash
run: |
echo "Invalid input for test field: ${{ inputs.test }}"
@ -142,3 +142,10 @@ runs:
uses: ./.github/actions/e2e_autoscaling
with:
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
- name: Run K-Bench
if: inputs.test == 'k-bench'
uses: ./.github/actions/k-bench
with:
cloudProvider: ${{ inputs.cloudProvider }}
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}

106
.github/actions/k-bench/action.yml vendored Normal file
View File

@ -0,0 +1,106 @@
name: k-bench
description: "Run K-Bench"
inputs:
cloudProvider:
description: "Which cloud provider to use."
required: true
kubeconfig:
description: "The kubeconfig of the cluster to test."
required: true
runs:
using: "composite"
steps:
- name: Checkout patched K-Bench
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # tag=v3.1.0
with:
fetch-depth: 0
repository: "edgelesssys/k-bench"
ref: 'feat/constellation'
path: k-bench
- name: Install patched K-Bench
working-directory: "k-bench"
run: ./install.sh
shell: bash
- name: Install Constellation GCP CSI driver and create storage class
if: ${{ inputs.cloudProvider == 'gcp' }}
shell: bash
run: |
kubectl apply -k github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver/deploy/kubernetes/overlays/edgeless/latest
kubectl wait -n kube-system deployments csi-gce-pd-controller --for condition=available
kubectl apply -f .github/actions/k-bench/gcp_sc.yml
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
- name: Install Constellation Azure CSI driver and create storage class
if: ${{ inputs.cloudProvider == 'azure' }}
shell: bash
run: |
helm install azuredisk-csi-driver https://raw.githubusercontent.com/edgelesssys/constellation-azuredisk-csi-driver/main/charts/edgeless/latest/azuredisk-csi-driver.tgz --namespace kube-system --set linux.distro=fedora --set controller.replicas=1
kubectl wait -n kube-system deployments csi-azuredisk-controller --for condition=available --timeout=300s
kubectl apply -f .github/actions/k-bench/azure_sc.yml
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
- name: Run K-Bench
shell: bash
working-directory: k-bench
run: |
mkdir -p ./out
kubectl create namespace kbench-pod-namespace --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f ../.github/actions/k-bench/encrypted_storage.yml
./run.sh -r "kbench-constellation-${{ inputs.cloudProvider }}" -t "default" -o "./out/"
kubectl delete namespace kbench-pod-namespace --wait=true || true
kubectl create namespace kbench-pod-namespace --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f ../.github/actions/k-bench/encrypted_storage.yml
./run.sh -r "kbench-constellation-${{ inputs.cloudProvider }}" -t "dp_fio" -o "./out/"
kubectl delete namespace kbench-pod-namespace --wait=true || true
kubectl create namespace kbench-pod-namespace --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f ../.github/actions/k-bench/encrypted_storage.yml
./run.sh -r "kbench-constellation-${{ inputs.cloudProvider }}" -t "dp_network_internode" -o "./out/"
kubectl delete namespace kbench-pod-namespace --wait=true || true
kubectl create namespace kbench-pod-namespace --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f ../.github/actions/k-bench/encrypted_storage.yml
./run.sh -r "kbench-constellation-${{ inputs.cloudProvider }}" -t "dp_network_intranode" -o "./out/"
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
- name: Delete namespace and PVCs
shell: bash
run: |
ELAPSED=0
kubectl delete namespace kbench-pod-namespace --wait=true || true
echo "::group::Wait for PV deletion"
until [ "$(kubectl get pv -o json | jq '.items | length')" == "0" ] || [ $ELAPSED -gt $PV_DELETION_TIMEOUT ];
do
echo $(kubectl get pv -o json | jq '.items | length') PV remaining..
sleep 1
ELAPSED=$((ELAPSED+1))
done
if [ $ELAPSED -gt $PV_DELETION_TIMEOUT ]; then
echo "Timed out waiting for PV deletion.."
exit 1
fi
echo "::endgroup::"
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
PV_DELETION_TIMEOUT: "120" # 2 minutes timeout for pv deletion
- name: Merge K-Bench results
working-directory: k-bench
shell: bash
run: |
mkdir -p "./out/kbench-constellation-${{ inputs.cloudProvider }}"
mv ./out/results_kbench-constellation-${{ inputs.cloudProvider }}_*m/* "./out/kbench-constellation-${{ inputs.cloudProvider }}/"
ls -l "./out/kbench-constellation-${{ inputs.cloudProvider }}"
cat ./out/kbench-constellation-${{ inputs.cloudProvider }}/*/kbench.log
- name: Upload original benchmark results
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # tag=v3.1.0
if: ${{ !env.ACT }}
with:
path: "k-bench/out/kbench-constellation-${{ inputs.cloudProvider }}"
name: "k-bench-constellation-${{ inputs.cloudProvider }}"

12
.github/actions/k-bench/azure_sc.yml vendored Normal file
View File

@ -0,0 +1,12 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: encrypted-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: azuredisk.csi.confidential.cloud
parameters:
skuName: StandardSSD_LRS
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@ -0,0 +1,39 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: fio-block-pvc
namespace: kbench-pod-namespace
spec:
accessModes:
- ReadWriteOnce
storageClassName: encrypted-storage
resources:
requests:
storage: 10Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: netperfserver-pvc
namespace: kbench-pod-namespace
spec:
accessModes:
- ReadWriteOnce
storageClassName: encrypted-storage
resources:
requests:
storage: 10Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: netperfclient-pvc
namespace: kbench-pod-namespace
spec:
accessModes:
- ReadWriteOnce
storageClassName: encrypted-storage
resources:
requests:
storage: 10Gi

12
.github/actions/k-bench/gcp_sc.yml vendored Normal file
View File

@ -0,0 +1,12 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: encrypted-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: gcp.csi.confidential.cloud
parameters:
type: pd-standard
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@ -26,6 +26,7 @@ on:
- "sonobuoy quick"
- "sonobuoy full"
- "autoscaling"
- "k-bench"
- "nop"
required: true
kubernetesVersion:

View File

@ -26,6 +26,7 @@ on:
- "sonobuoy quick"
- "sonobuoy full"
- "autoscaling"
- "k-bench"
- "nop"
required: true
kubernetesVersion: