s3proxy: initial e2e tests and workflows

This commit is contained in:
Otto Bittner 2023-10-12 13:27:02 +02:00
parent 76d7d30245
commit a19227cac9
9 changed files with 302 additions and 30 deletions

76
.github/actions/e2e_s3proxy/action.yml vendored Normal file
View File

@ -0,0 +1,76 @@
name: E2E Test s3proxy
description: "Test the s3proxy."
inputs:
kubeconfig:
description: "Kubeconfig to access target cluster"
required: true
s3AccessKey:
description: "Access key for s3proxy"
required: true
s3SecretKey:
description: "Secret key for s3proxy"
required: true
buildBuddyApiKey:
description: "BuildBuddy API key"
required: true
githubToken:
description: "GitHub token"
required: true
runs:
using: "composite"
steps:
- name: Setup bazel
uses: ./.github/actions/setup_bazel_nix
with:
useCache: "true"
buildBuddyApiKey: ${{ inputs.buildBuddyApiKey }}
- name: Get pseudoversion
id: pseudoversion
shell: bash
run: |
bazel build //bazel/settings:tag
echo pseudoversion=$(cat ./bazel-bin/bazel/settings/_tag.tags.txt) | tee -a "$GITHUB_OUTPUT"
- name: Log in to the Container registry
uses: ./.github/actions/container_registry_login
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ inputs.githubToken }}
- name: Build and push s3proxy image
id: s3proxybuild
shell: bash
run: |
bazel run //bazel/release:s3proxy_push
echo s3proxyImage=$(cat ./bazel-bin/bazel/release/s3proxy_tag.txt) | tee -a "$GITHUB_OUTPUT"
- name: Setup s3proxy
shell: bash
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
S3_PROXY_IMAGE: ${{ steps.s3proxybuild.outputs.s3proxyImage }}
AWS_ACCESS_KEY_ID: ${{ inputs.s3AccessKey }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.s3SecretKey }}
run: |
sed -i "s|AWS_ACCESS_KEY_ID: \"replaceme\"|AWS_ACCESS_KEY_ID: \"${AWS_ACCESS_KEY_ID}\"|g" s3proxy/deploy/deployment-s3proxy.yaml
sed -i "s|AWS_SECRET_ACCESS_KEY: \"replaceme\"|AWS_SECRET_ACCESS_KEY: \"${AWS_SECRET_ACCESS_KEY}\"|g" s3proxy/deploy/deployment-s3proxy.yaml
sed -i "s|image: ghcr.io/edgelesssys/constellation/s3proxy:v.*|image: \"${S3_PROXY_IMAGE}\"|g" s3proxy/deploy/deployment-s3proxy.yaml
yq eval -i '(select(.spec.template.spec.containers.[].name == "s3proxy").spec.template.spec.containers.[].args) = ["--level=-1","--allow-multipart"]' s3proxy/deploy/deployment-s3proxy.yaml
kubectl apply -f s3proxy/deploy/deployment-s3proxy.yaml
- name: Run mint
shell: bash
env:
KUBECONFIG: ${{ inputs.kubeconfig }}
ACCESS_KEY: ${{ inputs.s3AccessKey }}
SECRET_KEY: ${{ inputs.s3SecretKey }}
IMAGE: "ghcr.io/edgelesssys/mint:v1.99.0@sha256:96a059733087ec0bcf2c808406a626da2ffefe8e7c7cac786907b1b35b892234" # renovate:mint-fork
run: |
./s3proxy/e2e/deploy.sh "$IMAGE"

View File

@ -79,6 +79,10 @@ inputs:
selfManagedInfra:
description: "Use self-managed infrastructure instead of infrastructure created by the Constellation CLI."
default: "false"
s3AccessKey:
description: "Access key for s3proxy"
s3SecretKey:
description: "Secret key for s3proxy"
outputs:
kubeconfig:
@ -92,7 +96,7 @@ runs:
using: "composite"
steps:
- name: Check input
if: (!contains(fromJson('["sonobuoy full", "sonobuoy quick", "autoscaling", "perf-bench", "verify", "lb", "recover", "malicious join", "nop", "upgrade"]'), inputs.test))
if: (!contains(fromJson('["sonobuoy full", "sonobuoy quick", "autoscaling", "perf-bench", "verify", "lb", "recover", "malicious join", "s3proxy", "nop", "upgrade"]'), inputs.test))
shell: bash
run: |
echo "::error::Invalid input for test field: ${{ inputs.test }}"
@ -356,3 +360,13 @@ runs:
cloudProvider: ${{ inputs.cloudProvider }}
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
githubToken: ${{ inputs.githubToken }}
- name: Run s3proxy e2e test
if: inputs.test == 's3proxy'
uses: ./.github/actions/e2e_s3proxy
with:
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
s3AccessKey: ${{ inputs.s3AccessKey }}
s3SecretKey: ${{ inputs.s3SecretKey }}
buildBuddyApiKey: ${{ inputs.buildBuddyApiKey }}
githubToken: ${{ inputs.githubToken }}

View File

@ -169,6 +169,12 @@ jobs:
runner: "ubuntu-22.04"
selfManagedInfra: "true"
# s3proxy test on latest k8s version
- test: "s3proxy"
refStream: "ref/main/stream/debug/?"
provider: "gcp"
kubernetes-version: "v1.28"
#
# Tests on macOS runner
#
@ -232,6 +238,8 @@ jobs:
cosignPrivateKey: ${{ secrets.COSIGN_PRIVATE_KEY }}
githubToken: ${{ secrets.GITHUB_TOKEN }}
selfManagedInfra: ${{ matrix.selfManagedInfra == 'true' }}
s3AccessKey: ${{ secrets.AWS_ACCESS_KEY_ID_S3PROXY }}
s3SecretKey: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3PROXY }}
- name: Always terminate cluster
if: always()

View File

@ -189,6 +189,12 @@ jobs:
kubernetes-version: "v1.28"
selfManagedInfra: "true"
# s3proxy test on latest k8s version
- test: "s3proxy"
refStream: "ref/main/stream/debug/?"
provider: "gcp"
kubernetes-version: "v1.28"
#
# Tests on release-stable refStream
#
@ -251,6 +257,8 @@ jobs:
fetchMeasurements: ${{ matrix.refStream != 'ref/release/stream/stable/?' }}
azureSNPEnforcementPolicy: ${{ matrix.azureSNPEnforcementPolicy }}
selfManagedInfra: ${{ matrix.selfManagedInfra == 'true' }}
s3AccessKey: ${{ secrets.AWS_ACCESS_KEY_ID_S3PROXY }}
s3SecretKey: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3PROXY }}
- name: Always terminate cluster
if: always()

View File

@ -35,6 +35,7 @@ on:
- "verify"
- "recover"
- "malicious join"
- "s3proxy"
- "nop"
required: true
kubernetesVersion:
@ -270,6 +271,8 @@ jobs:
fetchMeasurements: ${{ contains(needs.find-latest-image.outputs.image, '/stream/stable/') }}
internalLoadBalancer: ${{ inputs.internalLoadBalancer }}
selfManagedInfra: ${{ inputs.selfManagedInfra }}
s3AccessKey: ${{ secrets.AWS_ACCESS_KEY_ID_S3PROXY }}
s3SecretKey: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3PROXY }}
- name: Always terminate cluster
if: always()

4
.gitignore vendored
View File

@ -69,3 +69,7 @@ __pycache__/
# nix
/result
# s3proxy misc files
port-forward.log
s3proxy-ca.crt

View File

@ -72,7 +72,7 @@ spec:
spec:
containers:
- name: s3proxy
image: ghcr.io/edgelesssys/constellation/s3proxy:v2.12.0
image: ghcr.io/edgelesssys/constellation/s3proxy:v2.13.0-pre
args:
- "--level=-1"
ports:
@ -100,6 +100,8 @@ apiVersion: v1
kind: Service
metadata:
name: s3proxy-service
labels:
app: s3proxy
spec:
selector:
app: s3proxy

47
s3proxy/e2e/clear.sh Executable file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
# This script can be used to remove buckets from S3.
# It will empty the buckets and eventually remove them.
# It is expected that the script throws some errors. E.g. "Bucket is missing Object Lock Configuration" or "Invalid type for parameter Delete.Objects, value: None [..]"
# These can be ignored. The first error is thrown if the bucket does not have object lock enabled. The second error is thrown if the bucket is already empty.
# In both cases the bucket is empty and can be removed.
# Usage: ./clear.sh <prefix>
# The prefix is necessary, as otherwise all buckets are removed.
readonly prefix=$1
if [ -z "$prefix" ]; then
echo "Usage: $0 <prefix>"
echo "WARNING: If you don't provide a prefix, all buckets are destroyed."
exit 1
fi
restore_aws_page="$AWS_PAGER"
export AWS_PAGER=""
function empty_bucket() {
# List all object versions in the bucket
versions=$(aws s3api list-object-versions --bucket "$1" --output=json --query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')
# Remove all legal holds
for version in $versions; do
key=$(echo "$version" | jq '.Objects[0].Key' | tr -d '"')
aws s3api put-object-legal-hold --bucket "$1" --key "$key" --legal-hold Status=OFF
done
# Delete all object versions
aws s3api delete-objects --bucket "$1" --delete "$versions" || true
# List all delete markers in the bucket
markers=$(aws s3api list-object-versions --bucket "$1" --output=json --query='{Objects: DeleteMarkers[].{Key:Key,VersionId:VersionId}}')
# Remove all delete markers
aws s3api delete-objects --bucket "$1" --delete "$markers" || true
}
for i in $(aws s3api list-buckets --query "Buckets[?starts_with(Name, '${prefix}')].Name" --output text); do
empty_bucket "$i"
aws s3 rb s3://"$i"
done
export AWS_PAGER="$restore_aws_page"

110
s3proxy/e2e/deploy.sh Executable file
View File

@ -0,0 +1,110 @@
#!/bin/bash
function terminate_mint() {
# shellcheck disable=SC2317
kubectl logs job/mint-deploy
# shellcheck disable=SC2317
kubectl delete job mint-deploy
}
if [[ ! $1 =~ ^ghcr.io/edgelesssys/mint:v.*$ ]]; then
echo "Error: invalid tag, expected input to match pattern '^ghcr.io\/edgelesssys\/mint:v*$'"
exit 1
fi
if [[ -z $KUBECONFIG ]]; then
echo "Error: KUBECONFIG environment variable not set"
exit 1
fi
if [[ -z $ACCESS_KEY ]]; then
echo "Error: ACCESS_KEY environment variable not set"
exit 1
fi
if [[ -z $SECRET_KEY ]]; then
echo "Error: SECRET_KEY environment variable not set"
exit 1
fi
# Wait for the s3proxy service to be created. kubectl wait can not wait for resources to be created.
start_time=$(date +%s)
timeout=300
while true; do
if [[ -n "$(kubectl get svc -l app=s3proxy -o jsonpath='{.items[*]}')" ]]; then
echo "Service with label app=s3proxy found"
service_ip=$(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}')
break
else
current_time=$(date +%s)
elapsed_time=$((current_time - start_time))
if [[ $elapsed_time -ge $timeout ]]; then
echo "Timeout waiting for service with label app=s3proxy"
exit 1
else
echo "Waiting for service with label app=s3proxy"
sleep 5
fi
fi
done
kubectl delete job mint-deploy --ignore-not-found=true
cat << EOF | kubectl apply -f -
apiVersion: batch/v1
kind: Job
metadata:
name: mint-deploy
spec:
template:
metadata:
name: mint-deploy
spec:
restartPolicy: Never
hostAliases:
- ip: "$service_ip"
hostnames:
- "s3.eu-west-1.amazonaws.com"
containers:
- name: mint
image: "$1"
args:
- "aws-sdk-go"
- "versioning"
volumeMounts:
- name: ca-cert
mountPath: /etc/ssl/certs/kube-ca.crt
subPath: kube-ca.crt
env:
- name: SERVER_REGION
value: eu-west-1
- name: SERVER_ENDPOINT
value: s3.eu-west-1.amazonaws.com:443
- name: ENABLE_HTTPS
value: "1"
- name: AWS_CA_BUNDLE
value: /etc/ssl/certs/kube-ca.crt
- name: ACCESS_KEY
value: "$ACCESS_KEY"
- name: SECRET_KEY
value: "$SECRET_KEY"
volumes:
- name: ca-cert
secret:
secretName: s3proxy-tls
items:
- key: ca.crt
path: kube-ca.crt
EOF
# Remove job before this script finishes.
trap "terminate_mint" EXIT
# Tests have to complete within 10 minutes, otherwise they have failed.
if kubectl wait --for=condition=complete job/mint-deploy --timeout=600s; then
echo "Mint tests completed successfully"
exit 0
else
echo "Mint tests failed"
exit 1
fi